diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2da85f389..6ea685d8f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -19,7 +19,7 @@ Fixes # I have: - [ ] Read and followed Crossplane's [contribution process]. -- [ ] Run `make reviewable` to ensure this PR is ready for review. +- [ ] Run `earthly +reviewable` to ensure this PR is ready for review. - [ ] Added or updated unit tests. - [ ] Added or updated e2e tests. - [ ] Linked a PR or a [docs tracking issue] to [document this change]. @@ -27,7 +27,7 @@ I have: Need help with this checklist? See the [cheat sheet]. -[contribution process]: https://github.com/crossplane/crossplane/tree/master/contributing +[contribution process]: https://github.com/crossplane/crossplane/tree/main/contributing [docs tracking issue]: https://github.com/crossplane/docs/issues/new [document this change]: https://docs.crossplane.io/contribute/contribute -[cheat sheet]: https://github.com/crossplane/crossplane/tree/master/contributing#checklist-cheat-sheet +[cheat sheet]: https://github.com/crossplane/crossplane/tree/main/contributing#checklist-cheat-sheet diff --git a/.github/renovate-entrypoint.sh b/.github/renovate-entrypoint.sh new file mode 100755 index 000000000..4ef46f961 --- /dev/null +++ b/.github/renovate-entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +curl -fsSLo /usr/local/bin/earthly https://github.com/earthly/earthly/releases/latest/download/earthly-linux-amd64 +chmod +x /usr/local/bin/earthly +/usr/local/bin/earthly bootstrap + +renovate diff --git a/.github/renovate.json5 b/.github/renovate.json5 index a85cfa113..407bc82ac 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -5,93 +5,275 @@ "helpers:pinGitHubActionDigests", ":semanticCommits" ], -// We only want renovate to rebase PRs when they have conflicts, -// default "auto" mode is not required. + // We only want renovate to rebase PRs when they have conflicts, default + // "auto" mode is not required. "rebaseWhen": "conflicted", -// The maximum number of PRs to be created in parallel + // The maximum number of PRs to be created in parallel "prConcurrentLimit": 5, -// The branches renovate should target -// PLEASE UPDATE THIS WHEN RELEASING. - "baseBranches": ["master","release-1.12","release-1.13","release-1.14"], + // The branches renovate should target + // PLEASE UPDATE THIS WHEN RELEASING. + "baseBranches": [ + "main", + "release-1.15", + "release-1.16", + "release-1.17" + ], "ignorePaths": [ "design/**", // We test upgrades, so leave it on an older version on purpose. "test/e2e/manifests/pkg/provider/provider-initial.yaml", ], - "postUpdateOptions": ["gomodTidy"], -// All PRs should have a label - "labels": ["automated"], + "postUpdateOptions": [ + "gomodTidy" + ], + // All PRs should have a label + "labels": [ + "automated" + ], "customManagers": [ { "customType": "regex", - "description": "Bump Go version used in workflows", - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$"], + "description": "Bump Earthly version in GitHub workflows", + "fileMatch": [ + "^\\.github\\/workflows\\/[^/]+\\.ya?ml$" + ], + "matchStrings": [ + "EARTHLY_VERSION: '(?.*?)'\\n" + ], + "datasourceTemplate": "github-releases", + "depNameTemplate": "earthly/earthly", + "extractVersionTemplate": "^v(?.*)$" + }, + { + "customType": "regex", + "description": "Bump Go version in Earthfile", + "fileMatch": [ + "^Earthfile$" + ], "matchStrings": [ - "GO_VERSION: '(?.*?)'\\n" + "ARG --global GO_VERSION=(?.*?)\\n" ], "datasourceTemplate": "golang-version", "depNameTemplate": "golang" - }, { + }, + { "customType": "regex", - "description": "Bump golangci-lint version in workflows and the Makefile", - "fileMatch": ["^\\.github\\/workflows\\/[^/]+\\.ya?ml$","^Makefile$"], + "description": "Bump golangci-lint version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], "matchStrings": [ - "GOLANGCI_VERSION: 'v(?.*?)'\\n", - "GOLANGCILINT_VERSION = (?.*?)\\n" + "ARG GOLANGCI_LINT_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", - "depNameTemplate": "golangci/golangci-lint", - "extractVersionTemplate": "^v(?.*)$" - }, { + "datasourceTemplate": "github-releases", + "depNameTemplate": "golangci/golangci-lint" + }, + { "customType": "regex", - "description": "Bump helm version in the Makefile", - "fileMatch": ["^Makefile$"], + "description": "Bump helm version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], "matchStrings": [ - "HELM3_VERSION = (?.*?)\\n" + "ARG HELM_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", "depNameTemplate": "helm/helm", - }, { + }, + { "customType": "regex", - "description": "Bump kind version in the Makefile", - "fileMatch": ["^Makefile$"], + "description": "Bump helm-docs version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], "matchStrings": [ - "KIND_VERSION = (?.*?)\\n" + "ARG HELM_DOCS_VERSION=(?.*?)\\n" ], - "datasourceTemplate": "github-tags", + "datasourceTemplate": "github-releases", + "depNameTemplate": "norwoodj/helm-docs", + "extractVersionTemplate": "^v(?.*)$" + }, + { + "customType": "regex", + "description": "Bump kind version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG KIND_VERSION=(?.*?)\\n" + ], + "datasourceTemplate": "github-releases", "depNameTemplate": "kubernetes-sigs/kind", - } + }, + { + "customType": "regex", + "description": "Bump kubectl version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG KUBECTL_VERSION=(?.*?)\\n" + ], + "datasourceTemplate": "github-releases", + "depNameTemplate": "kubernetes/kubernetes", + }, + { + "customType": "regex", + "description": "Bump gotestsum version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG GOTESTSUM_VERSION=(?.*?)\\n" + ], + "datasourceTemplate": "github-releases", + "depNameTemplate": "gotestyourself/gotestsum", + "extractVersionTemplate": "^v(?.*)$" + }, + { + "customType": "regex", + "description": "Bump codeql version in the Earthfile", + "fileMatch": [ + "^Earthfile$" + ], + "matchStrings": [ + "ARG CODEQL_VERSION=(?.*?)\\n" + ], + "datasourceTemplate": "github-releases", + "depNameTemplate": "github/codeql-action", + "extractVersionTemplate": "^codeql-bundle-(?.*)$" + }, ], + // Renovate doesn't have native Earthfile support, but because Earthfile + // syntax is a superset of Dockerfile syntax this works to update FROM images. + // https://github.com/renovatebot/renovate/issues/15975 + "dockerfile": { + "fileMatch": [ + "(^|/)Earthfile$" + ] + }, "crossplane": { - "fileMatch": ["(^|/)test/e2e/.*\\.ya?ml$"] + "fileMatch": [ + "(^|/)test/e2e/.*\\.ya?ml$" + ] }, -// PackageRules disabled below should be enabled in case of vulnerabilities + // PackageRules disabled below should be enabled in case of vulnerabilities "vulnerabilityAlerts": { "enabled": true }, "osvVulnerabilityAlerts": true, -// Renovate evaluates all packageRules in order, so low priority rules should -// be at the beginning, high priority at the end + // Renovate evaluates all packageRules in order, so low priority rules should + // be at the beginning, high priority at the end "packageRules": [ { - "matchManagers": ["crossplane"], - "matchFileNames": ["test/e2e/**"], + "description": "Generate code after upgrading go dependencies (main)", + "matchDatasources": [ + "go" + ], + // Currently we only have an Earthfile on main. + matchBaseBranches: ["main"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "earthly --strict +go-generate", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, + { + "description": "Generate code after upgrading go dependencies (release branch)", + "matchDatasources": [ + "go" + ], + // Currently we only have an Earthfile on main. + matchBaseBranches: ["release-.+"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "make go.generate", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, + { + "description": "Lint code after upgrading golangci-lint (main)", + "matchDepNames": [ + "golangci/golangci-lint" + ], + // Currently we only have an Earthfile on main. + matchBaseBranches: ["main"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "earthly --strict +go-lint", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, + { + "description": "Lint code after upgrading golangci-lint (release branch)", + "matchDepNames": [ + "golangci/golangci-lint" + ], + // Currently we only have an Earthfile on main. + matchBaseBranches: ["release-.+"], + postUpgradeTasks: { + // Post-upgrade tasks that are executed before a commit is made by Renovate. + "commands": [ + "make go.lint", + ], + fileFilters: [ + "**/*" + ], + executionMode: "update", + }, + }, + { + "matchManagers": [ + "crossplane" + ], + "matchFileNames": [ + "test/e2e/**" + ], "groupName": "e2e-manifests", - }, { + }, + { "description": "Ignore non-security related updates to release branches", - matchBaseBranches: [ "/^release-.*/"], + matchBaseBranches: [ + "/^release-.*/" + ], enabled: false, - }, { + }, + { "description": "Still update Docker images on release branches though", - "matchDatasources": ["docker"], - matchBaseBranches: [ "/^release-.*/"], + "matchDatasources": [ + "docker" + ], + matchBaseBranches: [ + "/^release-.*/" + ], enabled: true, - }, { + }, + { "description": "Only get Docker image updates every 2 weeks to reduce noise", - "matchDatasources": ["docker"], - "schedule": ["every 2 week on monday"], + "matchDatasources": [ + "docker" + ], + "schedule": [ + "every 2 week on monday" + ], enabled: true, - }, { + }, + { "description": "Ignore k8s.io/client-go older versions, they switched to semantic version and old tags are still available in the repo", "matchDatasources": [ "go" @@ -100,7 +282,8 @@ "k8s.io/client-go" ], "allowedVersions": "<1.0", - }, { + }, + { "description": "Ignore k8s dependencies, should be updated on crossplane-runtime", "matchDatasources": [ "go" @@ -110,17 +293,23 @@ "sigs.k8s.io" ], "enabled": false, - },{ + }, + { "description": "Only get dependency digest updates every month to reduce noise, except crossplane-runtime", - "excludePackageNames": ["github.com/crossplane/crossplane-runtime"], + "excludePackageNames": [ + "github.com/crossplane/crossplane-runtime" + ], "matchDatasources": [ "go" ], "matchUpdateTypes": [ "digest", ], - "extends": ["schedule:monthly"], - }, { + "extends": [ + "schedule:monthly" + ], + }, + { "description": "Ignore oss-fuzz, it's not using tags, we'll stick to master", "matchDepTypes": [ "action" @@ -129,6 +318,13 @@ "google/oss-fuzz" ], "enabled": false + }, + { + "description": "Group all go version updates", + "matchDatasources": [ + "golang-version" + ], + "groupName": "golang version", } - ] + ], } diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 7f311959e..b2b579161 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -22,12 +22,12 @@ jobs: if: github.event.pull_request.merged steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 - name: Open Backport PR - uses: zeebe-io/backport-action@e8161d6a0dbfa2651b7daa76cbb75bc7c925bbf3 # v2.4.1 + uses: zeebe-io/backport-action@ef20d86abccbac3ee3a73cb2efbdc06344c390e5 # v2.5.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} github_workspace: ${{ github.workspace }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1c0945fb5..8db651749 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,16 +10,17 @@ on: env: # Common versions - GO_VERSION: '1.22.0' - GOLANGCI_VERSION: 'v1.55.2' - DOCKER_BUILDX_VERSION: 'v0.10.0' + EARTHLY_VERSION: '0.8.13' + + # Force Earthly to use color output + FORCE_COLOR: "1" # Common users. We can't run a step 'if secrets.DOCKER_USR != ""' but we can run # a step 'if env.DOCKER_USR' != ""', so we copy these to succinctly test whether # credentials have been provided before trying to run steps that need them. DOCKER_USR: ${{ secrets.DOCKER_USR }} UPBOUND_MARKETPLACE_PUSH_ROBOT_USR: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} - XPKG_ACCESS_ID: ${{ secrets.XPKG_ACCESS_ID }} + jobs: check-diff: @@ -27,217 +28,192 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to DockerHub + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 + if: env.DOCKER_USR != '' with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-check-diff- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to GitHub Container Registry + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/main' + run: | + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - - name: Check Diff - run: make check-diff + - name: Generate Files + run: earthly --strict --remote-cache ghcr.io/upbound/crossplane-earthly-cache:${{ github.job }} +generate - detect-noop: - runs-on: ubuntu-22.04 - outputs: - noop: ${{ steps.noop.outputs.should_skip }} - steps: - - name: Detect No-op Changes - id: noop - uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1 + - name: Count Changed Files + id: changed_files + run: echo "count=$(git status --porcelain | wc -l)" >> $GITHUB_OUTPUT + + - name: Fail if Files Changed + if: steps.changed_files.outputs.count != 0 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7 with: - github_token: ${{ secrets.GITHUB_TOKEN }} - paths_ignore: '["**.md", "**.png", "**.jpg"]' - do_not_skip: '["workflow_dispatch", "schedule", "push"]' - concurrent_skipping: false + script: core.setFailed('Found changed files after running earthly +generate.') lint: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to DockerHub + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 + if: env.DOCKER_USR != '' with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-lint-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-lint- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to GitHub Container Registry + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/main' + run: | + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - # We could run 'make lint' to ensure our desired Go version, but we prefer - # this action because it leaves 'annotations' (i.e. it comments on PRs to - # point out linter violations). - name: Lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3 - with: - version: ${{ env.GOLANGCI_VERSION }} - skip-cache: true # We do our own caching. + run: earthly --strict --remote-cache ghcr.io/upbound/crossplane-earthly-cache:${{ github.job }} +lint codeql: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to DockerHub + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 + if: env.DOCKER_USR != '' with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-check-diff- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to GitHub Container Registry + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/main' + run: | + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Run CodeQL + run: earthly --strict --remote-cache ghcr.io/upbound/crossplane-earthly-cache:${{ github.job }} +ci-codeql - - name: Initialize CodeQL - uses: github/codeql-action/init@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3 + - name: Upload CodeQL Results to GitHub + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3 with: - languages: go + sarif_file: '_output/codeql/go.sarif' - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3 trivy-scan-fs: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Run Trivy vulnerability scanner in fs mode - uses: aquasecurity/trivy-action@84384bd6e777ef152729993b8145ea352e9dd3ef # 0.17.0 + uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 # 0.21.0 with: scan-type: 'fs' ignore-unfixed: true skip-dirs: design scan-ref: '.' - exit-code: '1' severity: 'CRITICAL,HIGH' + format: sarif + output: 'trivy-results.sarif' + + - name: Upload Trivy Results to GitHub + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3 + with: + sarif_file: 'trivy-results.sarif' unit-tests: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true - - - name: Fetch History - run: git fetch --prune --unshallow + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to DockerHub + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 + if: env.DOCKER_USR != '' with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-unit-tests-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-unit-tests- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to GitHub Container Registry + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Vendor Dependencies - run: make vendor vendor.check + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/main' + run: | + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Run Unit Tests - run: make -j2 test + run: earthly --strict --remote-cache ghcr.io/upbound/crossplane-earthly-cache:${{ github.job }} +test - name: Publish Unit Test Coverage - uses: codecov/codecov-action@ab904c41d6ece82784817410c45d8b8c02684457 # v3 + uses: codecov/codecov-action@125fc84a9a348dbcf27191600683ec096ec9021c # v4 with: flags: unittests - file: _output/tests/linux_amd64/coverage.txt + file: _output/tests/coverage.txt + token: ${{ secrets.CODECOV_TOKEN }} e2e-tests: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' strategy: fail-fast: false matrix: @@ -246,77 +222,55 @@ jobs: - environment-configs - usage - ssa-claims + - realtime-compositions steps: - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3 - with: - platforms: all - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3 - with: - version: ${{ env.DOCKER_BUILDX_VERSION }} - install: true - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Fetch History - run: git fetch --prune --unshallow - - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to DockerHub + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 + if: env.DOCKER_USR != '' with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-e2e-tests-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-e2e-tests- + username: ${{ secrets.DOCKER_USR }} + password: ${{ secrets.DOCKER_PSW }} - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + - name: Login to GitHub Container Registry + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-pkg- - - - name: Vendor Dependencies - run: make vendor vendor.check + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Build Helm Chart - run: make -j2 build - env: - # We're using docker buildx, which doesn't actually load the images it - # builds by default. Specifying --load does so. - BUILD_ARGS: "--load" + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/main' + run: | + echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV - name: Run E2E Tests - run: make e2e E2E_TEST_FLAGS="-test.v -test.failfast -fail-fast --kind-logs-location ./logs-kind --test-suite ${{ matrix.test-suite }}" + run: | + earthly --strict --allow-privileged --remote-cache ghcr.io/upbound/crossplane-earthly-cache:${{ github.job }}-${{ matrix.test-suite}} \ + +e2e --FLAGS="-test.failfast -fail-fast --test-suite ${{ matrix.test-suite }}" - - name: Upload artifacts - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 - if: failure() + - name: Publish E2E Test Flakes + if: '!cancelled()' + uses: buildpulse/buildpulse-action@d0d30f53585cf16b2e01811a5a753fd47968654a # v0.11.0 with: - name: e2e-kind-logs-${{ matrix.test-suite }} - path: ./logs-kind - if-no-files-found: error - retention-days: 7 + account: 45158470 + repository: 147886080 + key: ${{ secrets.BUILDPULSE_ACCESS_KEY_ID }} + secret: ${{ secrets.BUILDPULSE_SECRET_ACCESS_KEY }} + path: _output/tests/e2e-tests.xml publish-artifacts: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Cleanup Disk @@ -326,117 +280,68 @@ jobs: dotnet: true haskell: true tool-cache: true - large-packages: false swap-storage: false - - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3 - with: - platforms: all - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3 - with: - version: ${{ env.DOCKER_BUILDX_VERSION }} - install: true + # This works, and saves ~5GiB, but takes ~2 minutes to do it. + large-packages: false + # TODO(negz): Does having these around avoid Earthly needing to pull + # large images like golang? + docker-images: false - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true - - - name: Fetch History - run: git fetch --prune --unshallow - - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 - with: - go-version: ${{ env.GO_VERSION }} - - - name: Find the Go Build Cache - id: go - run: echo "cache=$(make go.cachedir)" >> $GITHUB_OUTPUT - - - name: Cache the Go Build Cache - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 - with: - path: ${{ steps.go.outputs.cache }} - key: ${{ runner.os }}-build-publish-artifacts-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-build-publish-artifacts- - - - name: Cache Go Dependencies - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: - path: .work/pkg - key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-pkg- - - - name: Vendor Dependencies - run: make vendor vendor.check - - - name: Build Artifacts - run: make -j2 build.all - env: - # We're using docker buildx, which doesn't actually load the images it - # builds by default. Specifying --load does so. - BUILD_ARGS: "--load" + fetch-depth: 0 - - name: Publish Artifacts to GitHub - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - name: output - path: _output/** + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - name: Login to DockerHub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} password: ${{ secrets.DOCKER_PSW }} - name: Login to Upbound - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' with: - registry: xpkg.upbound.io/upbound + registry: xpkg.upbound.io username: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} password: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} - - name: Publish Artifacts to Marketplace, DockerHub - run: make -j2 publish BRANCH_NAME=${GITHUB_REF##*/} - - - name: Promote Artifacts in DockerHub - if: github.ref == 'refs/heads/main' && env.DOCKER_USR != '' - run: make -j2 promote - env: - BRANCH_NAME: main - CHANNEL: main - - - name: Login to Spaces Artifacts Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 - if: env.XPKG_ACCESS_ID != '' - with: - registry: xpkg.upbound.io/spaces-artifacts - username: ${{ secrets.XPKG_ACCESS_ID }} - password: ${{ secrets.XPKG_TOKEN }} - - - name: Publish Artifacts to Spaces Artifacts Registry - run: make -j2 publish BRANCH_NAME=${GITHUB_REF##*/} - env: - REGISTRY_ORGS: xpkg.upbound.io/spaces-artifacts - - - name: Promote Artifacts in Spaces Artifacts Registry - if: github.ref == 'refs/heads/main' && env.DOCKER_USR != '' - run: make -j2 promote - env: - REGISTRY_ORGS: xpkg.upbound.io/spaces-artifacts - BRANCH_NAME: main - CHANNEL: main + - name: Login to GitHub Container Registry + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Configure Earthly to Push Cache to GitHub Container Registry + if: github.ref == 'refs/heads/main' + run: echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV + + - name: Configure Earthly to Push Artifacts + if: (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release-')) && env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' + run: echo "EARTHLY_PUSH=true" >> $GITHUB_ENV + + - name: Set CROSSPLANE_VERSION GitHub Environment Variable + run: earthly +ci-version + + - name: Build and Push Artifacts + run: earthly --strict --remote-cache ghcr.io/upbound/crossplane-earthly-cache:${{ github.job }} +ci-artifacts --CROSSPLANE_VERSION=${CROSSPLANE_VERSION} + + - name: Upload Artifacts to GitHub + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 + with: + name: output + path: _output/** fuzz-test: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: # TODO(negz): Can we make this use our Go build and dependency cache? It @@ -456,49 +361,41 @@ jobs: language: go - name: Upload Crash - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 if: failure() && steps.build.outcome == 'success' with: name: artifacts path: ./out/artifacts - # TODO(negz): Refactor this job. Should the parts pertaining to release - # branches live in promote.yaml instead? protobuf-schemas: runs-on: ubuntu-22.04 - needs: detect-noop - if: needs.detect-noop.outputs.noop != 'true' steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Setup Buf uses: bufbuild/buf-setup-action@v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} - name: Lint Protocol Buffers uses: bufbuild/buf-lint-action@v1 with: input: apis - - name: Detect Breaking Changes in Protocol Buffers (Main Branch) + # buf-breaking-action doesn't support branches + # https://github.com/bufbuild/buf-push-action/issues/34 + - name: Detect Breaking Changes in Protocol Buffers uses: bufbuild/buf-breaking-action@a074e988ee34efcd4927079e79c611f428354c01 # v1 - # We want to run this for the main branch, and PRs. - if: ${{ ! startsWith(github.ref, 'refs/heads/release-') }} + # We want to run this for the main branch, and PRs against main. + if: ${{ github.ref == 'refs/heads/main' || github.base_ref == 'main' }} with: input: apis against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=main,subdir=apis" - - name: Detect Breaking Changes in Protocol Buffers (Release Branch) - uses: bufbuild/buf-breaking-action@a074e988ee34efcd4927079e79c611f428354c01 # v1 - # We want to run this only on release branches. - if: ${{ startsWith(github.ref, 'refs/heads/release-') }} - with: - input: apis - against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=${GITHUB_REF_NAME},subdir=apis" - - name: Push Protocol Buffers to Buf Schema Registry - if: ${{ github.repository == 'crossplane/crossplane' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release-')) }} + if: ${{ github.repository == 'crossplane/crossplane' && github.ref == 'refs/heads/main' }} uses: bufbuild/buf-push-action@v1 with: input: apis diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml index 6ab39cf8c..bbb7bbf91 100644 --- a/.github/workflows/commands.yml +++ b/.github/workflows/commands.yml @@ -21,13 +21,34 @@ jobs: permission-level: write - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 - name: Open Backport PR - uses: zeebe-io/backport-action@e8161d6a0dbfa2651b7daa76cbb75bc7c925bbf3 # v2.4.1 + uses: zeebe-io/backport-action@ef20d86abccbac3ee3a73cb2efbdc06344c390e5 # v2.5.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} github_workspace: ${{ github.workspace }} version: v0.0.4 + + fresh: + runs-on: ubuntu-22.04 + if: startsWith(github.event.comment.body, '/fresh') + + steps: + - name: Extract Command + id: command + uses: xt0rted/slash-command-action@bf51f8f5f4ea3d58abc7eca58f77104182b23e88 # v2 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + command: fresh + reaction: "true" + reaction-type: "eyes" + allow-edits: "false" + permission-level: read + - name: Handle Command + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + labels: stale diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 868a497af..ce7671be5 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -6,10 +6,10 @@ on: jobs: checklist-completed: - if: github.actor != 'renovate[bot]' + if: github.actor != 'crossplane-renovate[bot]' runs-on: ubuntu-22.04 steps: - uses: mheap/require-checklist-action@01fe24747f8630a056d9ca79dfbbb755579850ab # v2 with: # The checklist must _exist_ and be filled out. - requireChecklist: true \ No newline at end of file + requireChecklist: true diff --git a/.github/workflows/promote.yml b/.github/workflows/promote.yml index 23a19ba16..16d3ef5a6 100644 --- a/.github/workflows/promote.yml +++ b/.github/workflows/promote.yml @@ -12,7 +12,6 @@ on: default: 'stable' # Note: For pre-releases, we want to promote the pre-release version to # the (stable) channel, but not set it as the "current" version. - # See: https://github.com/upbound/build/pull/243 pre-release: type: boolean description: 'This is a pre-release' @@ -21,14 +20,13 @@ on: env: # Common versions - GO_VERSION: '1.22.0' + EARTHLY_VERSION: '0.8.13' # Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run # a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether # credentials have been provided before trying to run steps that need them. DOCKER_USR: ${{ secrets.DOCKER_USR }} UPBOUND_MARKETPLACE_PUSH_ROBOT_USR: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} - XPKG_ACCESS_ID: ${{ secrets.XPKG_ACCESS_ID }} jobs: promote-artifacts: @@ -36,44 +34,28 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 - with: - submodules: true + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5 + - name: Setup Earthly + uses: earthly/actions-setup@v1 with: - go-version: ${{ env.GO_VERSION }} - - - name: Fetch History - run: git fetch --prune --unshallow + github-token: ${{ secrets.GITHUB_TOKEN }} + version: ${{ env.EARTHLY_VERSION }} - - name: Login to DockerHub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + - name: Promote Image to docker.io/upbound/crossplane:${{ inputs.channel }} if: env.DOCKER_USR != '' - with: - username: ${{ secrets.DOCKER_USR }} - password: ${{ secrets.DOCKER_PSW }} - - - name: Login to Upbound - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + run: | + earthly --strict \ + --push \ + --secret DOCKER_USER=${{ secrets.DOCKER_USR }} \ + --secret DOCKER_PASSWORD=${{ secrets.DOCKER_PSW }} \ + +ci-promote-image --CHANNEL=${{ inputs.channel }} --CROSSPLANE_VERSION=${{ inputs.version }} --CROSSPLANE_REPO=docker.io/upbound/crossplane + + - name: Promote Image to xpkg.upbound.io/upbound/crossplane:${{ inputs.channel }} if: env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' - with: - registry: xpkg.upbound.io/upbound - username: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} - password: ${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} - - - name: Login to Spaces Artifacts Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 - if: env.XPKG_ACCESS_ID != '' - with: - registry: xpkg.upbound.io/spaces-artifacts - username: ${{ secrets.XPKG_ACCESS_ID }} - password: ${{ secrets.XPKG_TOKEN }} - - - name: Promote Artifacts in DockerHub and Upbound Registry - if: env.DOCKER_USR != '' && env.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR != '' - run: make -j2 promote BRANCH_NAME=${GITHUB_REF##*/} - env: - VERSION: ${{ github.event.inputs.version }} - CHANNEL: ${{ github.event.inputs.channel }} + run: | + earthly --strict \ + --push \ + --secret DOCKER_USER=${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_USR }} \ + --secret DOCKER_PASSWORD=${{ secrets.UPBOUND_MARKETPLACE_PUSH_ROBOT_PSW }} \ + +ci-promote-image --CHANNEL=${{ inputs.channel }} --CROSSPLANE_VERSION=${{ inputs.version }} --CROSSPLANE_REPO=xpkg.upbound.io/upbound/crossplane diff --git a/.github/workflows/renovate.yml b/.github/workflows/renovate.yml new file mode 100644 index 000000000..799766699 --- /dev/null +++ b/.github/workflows/renovate.yml @@ -0,0 +1,54 @@ +name: Renovate +on: + # Allows manual/automated trigger for debugging purposes + workflow_dispatch: + inputs: + logLevel: + description: "Renovate's log level" + required: true + default: "info" + type: string + schedule: + - cron: '0 8 * * *' + +env: + # Common versions + EARTHLY_VERSION: '0.8.13' + + LOG_LEVEL: "info" + +jobs: + renovate: + runs-on: ubuntu-latest + if: | + !github.event.repository.fork && + !github.event.pull_request.head.repo.fork + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + + # Don't waste time starting Renovate if JSON is invalid + - name: Validate Renovate JSON + run: npx --yes --package renovate -- renovate-config-validator + + - name: Get token + id: get-github-app-token + uses: actions/create-github-app-token@a0de6af83968303c8c955486bf9739a57d23c7f1 # v1 + with: + app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }} + private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }} + + - name: Self-hosted Renovate + uses: renovatebot/github-action@063e0c946b9c1af35ef3450efc44114925d6e8e6 # v40.1.11 + env: + RENOVATE_REPOSITORIES: ${{ github.repository }} + # Use GitHub API to create commits + RENOVATE_PLATFORM_COMMIT: "true" + LOG_LEVEL: ${{ github.event.inputs.logLevel || env.LOG_LEVEL }} + RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^earthly .+"]' + with: + configurationFile: .github/renovate.json5 + token: '${{ steps.get-github-app-token.outputs.token }}' + mount-docker-socket: true + docker-user: root + docker-cmd-file: .github/renovate-entrypoint.sh diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index c39490e9f..53237354d 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -17,7 +17,7 @@ jobs: supported_releases: ${{ steps.get-releases.outputs.supported_releases }} steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 0 @@ -99,7 +99,7 @@ jobs: # we log to DockerHub to avoid rate limiting - name: Login To DockerHub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3 if: env.DOCKER_USR != '' with: username: ${{ secrets.DOCKER_USR }} @@ -110,21 +110,21 @@ jobs: run: docker pull ${{ matrix.image }}:${{ env.tag }} - name: Run Trivy Vulnerability Scanner - uses: aquasecurity/trivy-action@84384bd6e777ef152729993b8145ea352e9dd3ef # 0.17.0 + uses: aquasecurity/trivy-action@fd25fed6972e341ff0007ddb61f77e88103953c2 # 0.21.0 with: image-ref: ${{ matrix.image }}:${{ env.tag }} format: 'sarif' output: 'trivy-results.sarif' - name: Upload Artifact - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 with: name: trivy-${{ env.escaped_filename }}.sarif path: trivy-results.sarif retention-days: 3 - name: Upload Trivy Scan Results To GitHub Security Tab - uses: github/codeql-action/upload-sarif@e8893c57a1f3a2b659b6b55564fdfdbbd2982911 # v3 + uses: github/codeql-action/upload-sarif@f079b8493333aace61c81488f8bd40919487bd9f # v3 with: sarif_file: 'trivy-results.sarif' category: ${{ matrix.image }}:${{ env.tag }} diff --git a/.github/workflows/tag.yml b/.github/workflows/tag.yml index 6f12ffd7c..9509a3e2a 100644 --- a/.github/workflows/tag.yml +++ b/.github/workflows/tag.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - name: Create Tag uses: negz/create-tag@39bae1e0932567a58c20dea5a1a0d18358503320 # v1 diff --git a/.gitignore b/.gitignore index 8ef5e3bad..31d0f1988 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /.cache /.work +/.hack /_output /config/ /config diff --git a/.gitmodules b/.gitmodules index c2fad4707..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "build"] - path = build - url = https://github.com/upbound/build diff --git a/.golangci.yml b/.golangci.yml index 2cb328129..dc1aa045f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,106 @@ run: timeout: 10m - skip-files: - - "zz_generated\\..+\\.go$" - output: # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number + formats: + - format: colored-line-number + path: stderr + +linters: + enable-all: true + fast: false + + disable: + # These linters are all deprecated. We disable them explicitly to avoid the + # linter logging deprecation warnings. + - execinquery + + # These are linters we'd like to enable, but that will be labor intensive to + # make existing code compliant. + - wrapcheck + - varnamelen + - testpackage + - paralleltest + - nilnil + - gomnd + + # Below are linters that lint for things we don't value. Each entry below + # this line must have a comment explaining the rationale. + + # These linters add whitespace in an attempt to make code more readable. + # This isn't a widely accepted Go best practice, and would be laborious to + # apply to existing code. + - wsl + - nlreturn + + # Warns about uses of fmt.Sprintf that are less performant than alternatives + # such as string concatenation. We value readability more than performance + # unless performance is measured to be an issue. + - perfsprint + + # This linter: + # + # 1. Requires errors.Is/errors.As to test equality. + # 2. Requires all errors be wrapped with fmt.Errorf specifically. + # 3. Disallows errors.New inline - requires package level errors. + # + # 1 is covered by other linters. 2 is covered by wrapcheck, which can also + # handle our use of crossplane-runtime's errors package. 3 is more strict + # than we need. Not every error needs to be tested for equality. + - err113 + + # These linters duplicate gocognit, but calculate complexity differently. + - gocyclo + - cyclop + - nestif + - funlen + - maintidx + + # Enforces max line length. It's not idiomatic to enforce a strict limit on + # line length in Go. We'd prefer to lint for things that often cause long + # lines, like functions with too many parameters or long parameter names + # that duplicate their types. + - lll + + # Warns about struct instantiations that don't specify every field. Could be + # useful in theory to catch fields that are accidentally omitted. Seems like + # it would have many more false positives than useful catches, though. + - exhaustruct + + # Warns about TODO comments. The rationale being they should be issues + # instead. We're okay with using TODO to track minor cleanups for next time + # we touch a particular file. + - godox + + # Warns about duplicated code blocks within the same file. Could be useful + # to prompt folks to think about whether code should be broken out into a + # function, but generally we're less worried about DRY and fine with a + # little copying. We don't want to give folks the impression that we require + # every duplicated code block to be factored out into a function. + - dupl + + # Warns about returning interfaces rather than concrete types. We do think + # it's best to avoid returning interfaces where possible. However, at the + # time of writing enabling this linter would only catch the (many) cases + # where we must return an interface. + - ireturn + + # Warns about returning named variables. We do think it's best to avoid + # returning named variables where possible. However, at the time of writing + # enabling this linter would only catch the (many) cases where returning + # named variables is useful to document what the variables are. For example + # we believe it makes sense to return (ready bool) rather than just (bool) + # to communicate what the bool means. + - nonamedreturns + + # Warns about taking the address of a range variable. This isn't an issue in + # Go v1.22 and above: https://tip.golang.org/doc/go1.22 + - exportloopref + + # Warns about using magic numbers. We do think it's best to avoid magic + # numbers, but we should not be strict about it. + - mnd linters-settings: errcheck: @@ -18,14 +112,10 @@ linters-settings: # default is false: such cases aren't reported by default. check-blank: false - # [deprecated] comma-separated list of pairs of the form pkg:regex - # the regex is used to ignore names within pkg. (default "fmt:.*"). - # see https://github.com/kisielk/errcheck#the-deprecated-method for details - ignore: fmt:.*,io/ioutil:^Read.* - govet: # report about shadowed variables - check-shadowing: false + disable: + - shadow gofmt: # simplify code: gofmt with `-s` option, true by default @@ -41,14 +131,6 @@ linters-settings: - blank - dot - gocyclo: - # minimal code complexity to report, 30 by default (but we recommend 10-20) - min-complexity: 10 - - maligned: - # print struct with more effective memory layout or not, false by default - suggest-new: true - dupl: # tokens count to trigger issue, 150 by default threshold: 100 @@ -68,7 +150,8 @@ linters-settings: # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: # if it's called for subdir of a project it can't find funcs usages. All text editor integrations # with golangci-lint call it on a directory with the changed file. - check-exported: false + exported-is-used: true + exported-fields-are-used: true unparam: # Inspect exported functions, default is false. Set to true if no external program/library imports your code. @@ -106,57 +189,61 @@ linters-settings: nolintlint: require-explanation: true require-specific: true - - -linters: - enable: - - megacheck - - govet - - gocyclo - - gocritic - - goconst - - gci - - gofmt # We enable this as well as goimports for its simplify mode. - - prealloc - - revive - - unconvert - - misspell - - nakedret - - nolintlint - disable: - # These linters are all deprecated as of golangci-lint v1.49.0. We disable - # them explicitly to avoid the linter logging deprecation warnings. - - deadcode - - varcheck - - scopelint - - structcheck - - interfacer - - presets: - - bugs - - unused - fast: false - + depguard: + rules: + no_third_party_test_libraries: + list-mode: lax + files: + - $test + deny: + - pkg: github.com/stretchr/testify + desc: "See https://go.dev/wiki/TestComments#assert-libraries" + - pkg: github.com/onsi/ginkgo + desc: "See https://go.dev/wiki/TestComments#assert-libraries" + - pkg: github.com/onsi/gomega + desc: "See https://go.dev/wiki/TestComments#assert-libraries" + + interfacebloat: + max: 5 + + tagliatelle: + case: + rules: + json: goCamel issues: - # Excluding configuration per-path and per-linter + # Excluding generated files. + exclude-files: + - "zz_generated\\..+\\.go$" + - ".+\\.pb.go$" + # Excluding configuration per-path and per-linter. exclude-rules: # Exclude some linters from running on tests files. - path: _test(ing)?\.go linters: - - gocyclo + - gocognit - errcheck - - dupl - gosec - scopelint - unparam + - gochecknoinits + - gochecknoglobals + - containedctx + - forcetypeassert # Ease some gocritic warnings on test files. - path: _test\.go text: "(unnamedResult|exitAfterDefer)" linters: - gocritic + + # It's idiomatic to register Kubernetes types with a package scoped + # SchemeBuilder using an init function. + - path: apis/ + linters: + - gochecknoinits + - gochecknoglobals # These are performance optimisations rather than style issues per se. # They warn when function arguments or range values copy a lot of memory @@ -188,12 +275,26 @@ issues: linters: - gosec - gas + + # This is about implicit memory aliasing in a range loop. + # This is a false positive with Go v1.22 and above. + - text: "G601:" + linters: + - gosec + - gas # Some k8s dependencies do not have JSON tags on all fields in structs. - path: k8s.io/ linters: - musttag + # Various fields related to native patch and transform Composition are + # deprecated, but we can't drop support from Crossplane 1.x. We ignore the + # warnings globally instead of suppressing them with comments everywhere. + - text: "SA1019: .+ is deprecated: Use Composition Functions instead." + linters: + - staticcheck + # Independently from option `exclude` we use default exclude patterns, # it can be disabled by this option. To list all # excluded by default patterns execute `golangci-lint run --help`. @@ -209,7 +310,7 @@ issues: new: false # Maximum issues count per one linter. Set to 0 to disable. Default is 50. - max-per-linter: 0 + max-issues-per-linter: 0 # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. max-same-issues: 0 diff --git a/ADOPTERS.md b/ADOPTERS.md index b03687857..928d23fdb 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -15,7 +15,7 @@ maturity, and momentum. Thank you to all adopters and contributors of the Cross To add your organization to this list, you can choose any of the following options: 1. [Open a PR](https://github.com/crossplane/crossplane/pulls) to directly update this list, or - [edit this file](https://github.com/crossplane/crossplane/edit/master/ADOPTERS.md) directly in + [edit this file](https://github.com/crossplane/crossplane/edit/main/ADOPTERS.md) directly in Github 1. Fill out the [adopters form](https://forms.gle/dBQhiyYkYSdzXovN6) 1. Send an email to with your information for the table below @@ -33,7 +33,7 @@ This list is sorted in the order that organizations were added to it. | [RunWhen](https://runwhen.com) | @stewartshea | Builds production and developer environments that power the RunWhen Social Reliability Platform.| | [Nethopper](https://nethopper.io) | @ddonahuex | Main IaC component in Nethopper's [Cloud Management Platform](https://www.nethopper.io/platform). Nethopper's Cloud Management Platform combines Crossplane with Continuous Delivery to allow DevOps to create, update, and destroy infrastructure in any cloud.| | [Renault](https://www.renaultgroup.com/) | @smileisak | Building Renault Kubernetes Platform resources using XRDs and compositions for an additional layer of abstraction to provide end-user services. | -| [Gympass](https://gympass.com) | @caiofralmeida | Builds a self-service platform so engineers can be more productive in resource provisioning. | +| [Wellhub (formerly Gympass)](https://wellhub.com) | @caiofralmeida @LCaparelli | Builds a self-service platform so engineers can be more productive in resource provisioning. | | [Deutsche Kreditbank AG](https://www.dkb.de/) | @akesser | At DKB, we have fully integrated Crossplane into our DKB Standard Operating Platform. Starting to move to production in mid-2023, all our clusters now use Crossplane to manage thousands of resources. | | [Akuity](https://akuity.io) | @wanghong230 | Control plane and infrastructure management solution for [Akuity Platform - Managed Argo CD](https://akuity.io/akuity-platform/). Crossplane manages some infrastructure part of dev, staging, and production environments. | | [Neux](https://neux.io) | @styk-tv | In production, running dynamic Crossplane control plane for auto-adjusting kafka/connect/telegraf payload transformations, filtering to/from sources/destinations. | @@ -75,3 +75,20 @@ This list is sorted in the order that organizations were added to it. | [Imagine Learning](https://www.imaginelearning.com/) | [@blakeromano](https://github.com/blakeromano) [blake.romano@imaginelearning.com](mailto:blake.romano@imaginelearning.com) | Control Plane for Infrastructure in Internal Developer Platform | | [babelforce](https://www.babelforce.com/) | [@nik843](https://github.com/nik843) | Orchestrating relational database servers by creating databases, users and permissions for them within all environments. | | [Nike](https://nike.com/) | [joel.cooklin@nike.com](mailto:joel.cooklin@nike.com) | Crossplane powers the internal developer platform managing thousands of resources from development to production. | +| [Elastic](https://elastic.co) | [@hwoarang](https://github.com/hwoarang) | We use Crossplane to deploy resources across multiple Cloud Providers for the [Elastic Serverless](https://www.elastic.co/elasticsearch/serverless) products. | +| [DB Systel](https://www.dbsystel.de) | [@gandhiano](htttps://github.com/gandhiano) | Backbone of the Developer Experience Platform of the [Deutsche Bahn](https://deutschebahn.com). Through Crossplane, application developers can easily provision and integrate a panoply of platform services, creating a coherent platform experience. Cloud infrastructure can also be self-serviced, allowing a 100% Gitops infrastructure-as-code approach. Both the K8s API and a developer portal UI ([Backstage](https://backstage.io)) can be used to interact with the Crossplane compositions.| +| [Akamai](https://www.akamai.com/) | [@nolancon](https://github.com/nolancon) | We use Crossplane to offer customers [provider-linode](https://github.com/linode/provider-linode), a control plane for Akamai Cloud Computing services based on Linode. We have also used Crossplane to develop [provider-ceph](https://github.com/linode/provider-ceph). Provider Ceph is an object storage control plane for Ceph. It is capable orchestrating up to 200k Managed Resources which represent S3 buckets distributed across multiple Ceph clusters. | +| [Variphy](https://www.variphy.com/) | [info@variphy.com](mailto:info@variphy.com) ([@dmvariphy](https://github.com/dmvariphy) [@nick-variphy](https://github.com/nick-variphy) [@dfalter-variphy](https://github.com/dfalter-variphy) [@zach-variphy](https://github.com/zach-variphy)) | We use Crossplane (via [Upbound Cloud](https://www.upbound.io/)) to manage our development and production infrastructure via GitOps. Crossplane also allows us to provide custom APIs for production Variphy applications to dynamically manage external resources, such as [Confluent Cloud](https://www.confluent.io/) Kafka topics. | +| [OneUptime](https://oneuptime.com) | @simlarsen | Builds production and developer environments that power the OneUptime Platform. | +| [Xata](https://xata.io) | [@mattfield](https://github.com/mattfield) [@paulaguijarro](https://github.com/paulaguijarro) | Crossplane manages the dev, staging, and production RDS Aurora PostgreSQL clusters for our [Dedicated Clusters](https://xata.io/blog/postgres-dedicated-clusters) offering, along with Flux Kustomizations and other resources that provision cells of internal [Xata](https://xata.io) services. | +| [AlphaSense](https://www.alpha-sense.com/) | @abhihendre | Engineering teams at [AlphaSense](https://www.alpha-sense.com/) leverage Crossplane APIs, abstracted by a set of Helm charts and Compositions curated by our Platform Teams, to seamlessly provision cloud services across three major clouds, including our production environment.| +| [UiPath](https://www.uipath.com/) | [@mjnovice](https://github.com/mjnovice) | Control plane for infrastructure management which powers [AutomationSuite](https://docs.uipath.com/automation-suite/automation-suite/2023.10/installation-guide-eks-aks/automation-suite-on-eksaks-overview) | +| [CloudScript](https://www.cloudscript.com.br/) | @xcloudscript | [CloudScript](https://www.cloudscript.com.br/) engineers have been using Crossplane since 2022 creating customized Compositions for the implementation of our engineering platform, basically automating the creation of Kubernetes environments on AWS, GCP and Azure ( coming soon).| +| [SpareBank 1 Utvikling](https://sparebank1.dev/) | [@chlunde](https://github.com/chlunde) | Crossplane powers our Internal Developer Platform. It is utilized for day-to-day operations via GitOps and enabled us to execute a large-scale self-service migration of over a thousand production microservices, databases and caches from on-premises to EKS and managed AWS services. | +| [Veset](https://veset.tv/) | [@pblgomez](https://github.com/pblgomez) | At Veset we are deploying all our backend resources in production environments to be managed by Crossplane. | +| [Hyland Software](https://www.hyland.com/) | [@sethfduke](mailto:seth.duke@hyland.com) | Hyland is utilizing Crossplane in production environments to simplify the infrastructure provisioning process for internal development teams, providing a simple means of creating resources, while maintaining control over compliance, security, and best-practices of these resources through a suite of Compositions. | +| [Skillsoft](https://www.skillsoft.com/) | [@brandon-powers](https://github.com/brandon-powers) | At Skillsoft, Crossplane automates the provisioning and management of our AWS infrastructure (S3, Athena, and Glue) to support core Apache Kafka services powering our online learning platform, [Percipio](https://www.skillsoft.com/meet-skillsoft-percipio), in production environments. | +| [Sopra Steria NO](https://www.soprasteria.no/) | [Eirik Holgernes](mailto:eirik.holgernes@soprasteria.com) | As a consultant agency, [Sopra Steria NO](https://www.soprasteria.no/) is leveraging the benefits of [Crossplane](https://www.crossplane.io/) to create self-service backends to increase speed and agility for the developers and engineers of our customers.
With the power of the compositions and composite resource definitions, the life cycle management of resources in [Kubernetes](https://kubernetes.io/) and deployment using GitOps tools like [Flux](https://fluxcd.io/) or [Argo CD](https://argoproj.github.io/cd/), our customers are taking giant strides into the future! | +| [Zuru Tech Italy](https://zuru.tech/) | [@nello1992](https://github.com/nello1992) | We currently use Crossplane in production environments to deploy workload clusters, with more use cases across the organization to come. | +| [Rogo](https://rogodata.com/) | [@aiell0](https://github.com/aiell0) | We use Crossplane to deploy application-specific infrastructure to multiple cloud providers in our production environments. | +| [Arcfield](https://arcfield.com/) | [@DE-Wizard](https://github.com/DE-Wizard) | Our entire cloud architecture was redesigned from the ground up using [Crossplane](https://www.crossplane.io/) to manage the cloud resources and [Flux](https://fluxcd.io/) to manage feeding [Crossplane](https://www.crossplane.io/) with its configurations. We have architected a Control - Workload cluster configuration that spans multiple regions and providers. The combination of the 2 controllers allowed us to more tightly control environment changes and apply drift correction to mitigate manual configuration changes that may be unauthorized. Our combination covers both dev and production environments with the production environment Master Control Cluster having dominion over both in the end. | diff --git a/CHARTER.md b/CHARTER.md index 77305b82e..f010b53dc 100644 --- a/CHARTER.md +++ b/CHARTER.md @@ -116,4 +116,4 @@ This is a living document. Changes to the scope, principles, or mission statement of the Crossplane project require a [majority vote][sc-voting] of the steering committee. -[sc-voting]: https://github.com/crossplane/crossplane/blob/master/GOVERNANCE.md#updating-the-governance +[sc-voting]: https://github.com/crossplane/crossplane/blob/main/GOVERNANCE.md#updating-the-governance diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index a3ee6a8a5..ea1133ab7 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ ## Community Code of Conduct -This project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +This project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/Earthfile b/Earthfile new file mode 100644 index 000000000..34a924062 --- /dev/null +++ b/Earthfile @@ -0,0 +1,444 @@ +# See https://docs.earthly.dev/docs/earthfile/features +VERSION --try --raw-output 0.8 + +PROJECT upbound/crossplane + +ARG --global GO_VERSION=1.22.3 + +# reviewable checks that a branch is ready for review. Run it before opening a +# pull request. It will catch a lot of the things our CI workflow will catch. +reviewable: + WAIT + BUILD +generate + END + BUILD +lint + BUILD +test + +# test runs unit tests. +test: + BUILD +go-test + +# lint runs linters. +lint: + BUILD +go-lint + BUILD +helm-lint + +# build builds Crossplane for your native OS and architecture. +build: + ARG USERPLATFORM + BUILD --platform=$USERPLATFORM +go-build + BUILD +image + BUILD +helm-build + +# multiplatform-build builds Crossplane for all supported OS and architectures. +multiplatform-build: + BUILD +go-multiplatform-build + BUILD +multiplatform-image + BUILD +helm-build + +# generate runs code generation. To keep builds fast, it doesn't run as part of +# the build target. It's important to run it explicitly when code needs to be +# generated, for example when you update an API type. +generate: + BUILD +go-modules-tidy + BUILD +go-generate + BUILD +helm-generate + +# e2e runs end-to-end tests. See test/e2e/README.md for details. +e2e: + ARG FLAGS="-test-suite=base" + # Docker installs faster on Alpine, and we only need Go for go tool test2json. + FROM golang:${GO_VERSION}-alpine3.20 + RUN apk add --no-cache docker jq + COPY +helm-setup/helm /usr/local/bin/helm + COPY +kind-setup/kind /usr/local/bin/kind + COPY +gotestsum-setup/gotestsum /usr/local/bin/gotestsum + COPY +go-build-e2e/e2e . + COPY --dir cluster test . + TRY + # Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long + # as no code changed. If the version contains a git commit (the default) the + # build layer cache is invalidated on every commit. + WITH DOCKER --load crossplane-e2e/crossplane:latest=(+image --CROSSPLANE_VERSION=v0.0.0-e2e) + # TODO(negz:) Set GITHUB_ACTIONS=true and use RUN --raw-output when + # https://github.com/earthly/earthly/issues/4143 is fixed. + RUN gotestsum --no-color=false --format testname --junitfile e2e-tests.xml --raw-command go tool test2json -t -p E2E ./e2e -test.v ${FLAGS} + END + FINALLY + SAVE ARTIFACT --if-exists e2e-tests.xml AS LOCAL _output/tests/e2e-tests.xml + END + +# hack builds Crossplane, and deploys it to a kind cluster. It runs in your +# local environment, not a container. The kind cluster will keep running until +# you run the unhack target. Run hack again to rebuild Crossplane and restart +# the kind cluster with the new build. +hack: + # TODO(negz): This could run an interactive shell inside a temporary container + # once https://github.com/earthly/earthly/issues/3206 is fixed. + ARG USERPLATFORM + LOCALLY + WAIT + BUILD +unhack + END + COPY --platform=${USERPLATFORM} +helm-setup/helm .hack/helm + COPY --platform=${USERPLATFORM} +kind-setup/kind .hack/kind + COPY (+helm-build/output --CROSSPLANE_VERSION=v0.0.0-hack) .hack/charts + WITH DOCKER --load crossplane-hack/crossplane:hack=+image + RUN \ + .hack/kind create cluster --name crossplane-hack && \ + .hack/kind load docker-image --name crossplane-hack crossplane-hack/crossplane:hack && \ + .hack/helm install --create-namespace --namespace crossplane-system crossplane .hack/charts/crossplane-0.0.0-hack.tgz \ + --set "image.pullPolicy=Never,image.repository=crossplane-hack/crossplane,image.tag=hack" \ + --set "args={--debug}" + END + RUN docker image rm crossplane-hack/crossplane:hack + RUN rm -rf .hack + +# unhack deletes the kind cluster created by the hack target. +unhack: + ARG USERPLATFORM + LOCALLY + COPY --platform=${USERPLATFORM} +kind-setup/kind .hack/kind + RUN .hack/kind delete cluster --name crossplane-hack + RUN rm -rf .hack + +# go-modules downloads Crossplane's go modules. It's the base target of most Go +# related target (go-build, etc). +go-modules: + ARG NATIVEPLATFORM + FROM --platform=${NATIVEPLATFORM} golang:${GO_VERSION} + WORKDIR /crossplane + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY go.mod go.sum ./ + RUN go mod download + SAVE ARTIFACT go.mod AS LOCAL go.mod + SAVE ARTIFACT go.sum AS LOCAL go.sum + +# go-modules-tidy tidies and verifies go.mod and go.sum. +go-modules-tidy: + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ test/ . + RUN go mod tidy + RUN go mod verify + SAVE ARTIFACT go.mod AS LOCAL go.mod + SAVE ARTIFACT go.sum AS LOCAL go.sum + +# go-generate runs Go code generation. +go-generate: + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY +kubectl-setup/kubectl /usr/local/bin/kubectl + COPY --dir cluster/crd-patches cluster/crd-patches + COPY --dir hack/ apis/ internal/ . + RUN go generate -tags 'generate' ./apis/... + # TODO(negz): Can this move into generate.go? Ideally it would live there with + # the code that actually generates the CRDs, but it depends on kubectl. + RUN kubectl patch --local --type=json \ + --patch-file cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml \ + --filename cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml \ + --output=yaml > /tmp/patched.yaml \ + && mv /tmp/patched.yaml cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml + SAVE ARTIFACT apis/ AS LOCAL apis + SAVE ARTIFACT cluster/crds AS LOCAL cluster/crds + +# go-build builds Crossplane binaries for your native OS and architecture. +go-build: + ARG EARTHLY_GIT_SHORT_HASH + ARG EARTHLY_GIT_COMMIT_TIMESTAMP + ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} + ARG TARGETARCH + ARG TARGETOS + ARG GOARCH=${TARGETARCH} + ARG GOOS=${TARGETOS} + ARG GOFLAGS="-ldflags=-X=github.com/crossplane/crossplane/internal/version.version=${CROSSPLANE_VERSION}" + ARG CGO_ENABLED=0 + FROM +go-modules + LET ext = "" + IF [ "$GOOS" = "windows" ] + SET ext = ".exe" + END + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ . + RUN go build -o crossplane${ext} ./cmd/crossplane + RUN go build -o crank${ext} ./cmd/crank + SAVE ARTIFACT crossplane${ext} AS LOCAL _output/bin/${GOOS}_${GOARCH}/crossplane${ext} + SAVE ARTIFACT crank${ext} AS LOCAL _output/bin/${GOOS}_${GOARCH}/crank${ext} + +# go-multiplatform-build builds Crossplane binaries for all supported OS +# and architectures. +go-multiplatform-build: + BUILD \ + --platform=linux/amd64 \ + --platform=linux/arm64 \ + --platform=linux/arm \ + --platform=linux/ppc64le \ + --platform=darwin/arm64 \ + --platform=darwin/amd64 \ + --platform=windows/amd64 \ + +go-build + +# go-build-e2e builds Crossplane's end-to-end tests. +go-build-e2e: + ARG CGO_ENABLED=0 + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ internal/ test/ . + RUN go test -c -o e2e ./test/e2e + SAVE ARTIFACT e2e + +# go-test runs Go unit tests. +go-test: + FROM +go-modules + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ . + RUN go test -covermode=count -coverprofile=coverage.txt ./... + SAVE ARTIFACT coverage.txt AS LOCAL _output/tests/coverage.txt + +# go-lint lints Go code. +go-lint: + ARG GOLANGCI_LINT_VERSION=v1.59.0 + FROM +go-modules + # This cache is private because golangci-lint doesn't support concurrent runs. + CACHE --id go-lint --sharing private /root/.cache/golangci-lint + CACHE --id go-build --sharing shared /root/.cache/go-build + RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VERSION} + COPY .golangci.yml . + COPY --dir apis/ cmd/ internal/ pkg/ test/ . + RUN golangci-lint run --fix + SAVE ARTIFACT apis AS LOCAL apis + SAVE ARTIFACT cmd AS LOCAL cmd + SAVE ARTIFACT internal AS LOCAL internal + SAVE ARTIFACT pkg AS LOCAL pkg + SAVE ARTIFACT test AS LOCAL test + +# image builds the Crossplane OCI image for your native architecture. +image: + ARG EARTHLY_GIT_BRANCH + ARG EARTHLY_GIT_SHORT_HASH + ARG EARTHLY_GIT_COMMIT_TIMESTAMP + ARG CROSSPLANE_REPO=build-${EARTHLY_GIT_SHORT_HASH}/crossplane + ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} + ARG NATIVEPLATFORM + ARG TARGETPLATFORM + ARG TARGETARCH + ARG TARGETOS + FROM --platform=${TARGETPLATFORM} gcr.io/distroless/static@sha256:41972110a1c1a5c0b6adb283e8aa092c43c31f7c5d79b8656fbffff2c3e61f05 + COPY --platform=${NATIVEPLATFORM} (+go-build/crossplane --GOOS=${TARGETOS} --GOARCH=${TARGETARCH}) /usr/local/bin/ + COPY --dir cluster/crds/ /crds + COPY --dir cluster/webhookconfigurations/ /webhookconfigurations + EXPOSE 8080 + USER 65532 + ENTRYPOINT ["crossplane"] + SAVE IMAGE --push ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION} + SAVE IMAGE --push ${CROSSPLANE_REPO}:${EARTHLY_GIT_BRANCH} + +# multiplatform-image builds the Crossplane OCI image for all supported +# architectures. +multiplatform-image: + BUILD \ + --platform=linux/amd64 \ + --platform=linux/arm64 \ + --platform=linux/arm \ + --platform=linux/ppc64le \ + +image + +# helm-lint lints the Crossplane Helm chart. +helm-lint: + FROM alpine:3.20 + WORKDIR /chart + COPY +helm-setup/helm /usr/local/bin/helm + COPY cluster/charts/crossplane/ . + RUN --entrypoint helm lint + +# helm-generate runs Helm code generation - specifically helm-docs. +helm-generate: + FROM alpine:3.20 + WORKDIR /chart + COPY +helm-docs-setup/helm-docs /usr/local/bin/helm-docs + COPY cluster/charts/crossplane/ . + RUN helm-docs + SAVE ARTIFACT . AS LOCAL cluster/charts/crossplane + +# helm-build packages the Crossplane Helm chart. +helm-build: + ARG EARTHLY_GIT_SHORT_HASH + ARG EARTHLY_GIT_COMMIT_TIMESTAMP + ARG CROSSPLANE_VERSION=v0.0.0-${EARTHLY_GIT_COMMIT_TIMESTAMP}-${EARTHLY_GIT_SHORT_HASH} + FROM alpine:3.20 + WORKDIR /chart + COPY +helm-setup/helm /usr/local/bin/helm + COPY cluster/charts/crossplane/ . + # We strip the leading v from Helm chart versions. + LET CROSSPLANE_CHART_VERSION=$(echo ${CROSSPLANE_VERSION}|sed -e 's/^v//') + RUN helm dependency update + RUN helm package --version ${CROSSPLANE_CHART_VERSION} --app-version ${CROSSPLANE_CHART_VERSION} -d output . + SAVE ARTIFACT output AS LOCAL _output/charts + +# kubectl-setup is used by other targets to setup kubectl. +kubectl-setup: + ARG KUBECTL_VERSION=v1.30.1 + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSL https://dl.k8s.io/${KUBECTL_VERSION}/kubernetes-client-${TARGETOS}-${TARGETARCH}.tar.gz|tar zx + SAVE ARTIFACT kubernetes/client/bin/kubectl + +# kind-setup is used by other targets to setup kind. +kind-setup: + ARG KIND_VERSION=v0.23.0 + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSLo kind https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-${TARGETOS}-${TARGETARCH}&&chmod +x kind + SAVE ARTIFACT kind + +# gotestsum-setup is used by other targets to setup gotestsum. +gotestsum-setup: + ARG GOTESTSUM_VERSION=1.12.0 + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSL https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${TARGETOS}_${TARGETARCH}.tar.gz|tar zx>gotestsum + SAVE ARTIFACT gotestsum + +# helm-docs-setup is used by other targets to setup helm-docs. +helm-docs-setup: + ARG HELM_DOCS_VERSION=1.13.1 + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + IF [ "${TARGETARCH}" = "amd64" ] + LET ARCH=x86_64 + ELSE + LET ARCH=${TARGETARCH} + END + RUN curl -fsSL https://github.com/norwoodj/helm-docs/releases/download/v${HELM_DOCS_VERSION}/helm-docs_${HELM_DOCS_VERSION}_${TARGETOS}_${ARCH}.tar.gz|tar zx>helm-docs + SAVE ARTIFACT helm-docs + +# helm-setup is used by other targets to setup helm. +helm-setup: + ARG HELM_VERSION=v3.15.1 + ARG NATIVEPLATFORM + ARG TARGETOS + ARG TARGETARCH + FROM --platform=${NATIVEPLATFORM} curlimages/curl:8.8.0 + RUN curl -fsSL https://get.helm.sh/helm-${HELM_VERSION}-${TARGETOS}-${TARGETARCH}.tar.gz|tar zx --strip-components=1 + SAVE ARTIFACT helm + +# Targets below this point are intended only for use in GitHub Actions CI. They +# may not work outside of that environment. For example they may depend on +# secrets that are only availble in the CI environment. Targets below this point +# must be prefixed with ci-. + +# TODO(negz): Is there a better way to determine the Crossplane version? +# This versioning approach maintains compatibility with the build submodule. See +# https://github.com/crossplane/build/blob/231258/makelib/common.mk#L205. This +# approach is problematic in Earthly because computing it inside a containerized +# target requires copying the entire git repository into the container. Doing so +# would invalidate all dependent target caches any time any file in git changed. + +# ci-version is used by CI to set the CROSSPLANE_VERSION environment variable. +ci-version: + LOCALLY + RUN echo "CROSSPLANE_VERSION=$(git describe --dirty --always --tags|sed -e 's/-/./2g'|sed 's/[\.,-]up.*//')" > $GITHUB_ENV + +# ci-artifacts is used by CI to build and push the Crossplane image, chart, and +# binaries. +ci-artifacts: + BUILD +multiplatform-build \ + --CROSSPLANE_REPO=index.docker.io/upbound/crossplane \ + --CROSSPLANE_REPO=xpkg.upbound.io/upbound/crossplane + +# ci-codeql-setup sets up CodeQL for the ci-codeql target. +ci-codeql-setup: + ARG CODEQL_VERSION=v2.17.3 + FROM curlimages/curl:8.8.0 + RUN curl -fsSL https://github.com/github/codeql-action/releases/download/codeql-bundle-${CODEQL_VERSION}/codeql-bundle-linux64.tar.gz|tar zx + SAVE ARTIFACT codeql + +# ci-codeql is used by CI to build Crossplane with CodeQL scanning enabled. +ci-codeql: + ARG CGO_ENABLED=0 + ARG TARGETOS + ARG TARGETARCH + # Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long + # as no code changed. If the version contains a git commit (the default) the + # build layer cache is invalidated on every commit. + FROM +go-modules --CROSSPLANE_VERSION=v0.0.0-codeql + IF [ "${TARGETARCH}" = "arm64" ] && [ "${TARGETOS}" = "linux" ] + RUN --no-cache echo "CodeQL doesn't support Linux on Apple Silicon" && false + END + COPY --dir +ci-codeql-setup/codeql /codeql + CACHE --id go-build --sharing shared /root/.cache/go-build + COPY --dir apis/ cmd/ internal/ pkg/ . + RUN /codeql/codeql database create /codeqldb --language=go + RUN /codeql/codeql database analyze /codeqldb --threads=0 --format=sarif-latest --output=go.sarif --sarif-add-baseline-file-info + SAVE ARTIFACT go.sarif AS LOCAL _output/codeql/go.sarif + +# ci-promote-image is used by CI to promote a Crossplane image to a channel. +# In practice, this means creating a new channel tag (e.g. master or stable) +# that points to the supplied version. +ci-promote-image: + ARG --required CROSSPLANE_REPO + ARG --required CROSSPLANE_VERSION + ARG --required CHANNEL + FROM alpine:3.20 + RUN apk add docker + # We need to omit the registry argument when we're logging into Docker Hub. + # Otherwise login will appear to succeed, but buildx will fail on auth. + IF [[ "${CROSSPLANE_REPO}" == *docker.io/* ]] + RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} + ELSE + RUN --secret DOCKER_USER --secret DOCKER_PASSWORD docker login -u ${DOCKER_USER} -p ${DOCKER_PASSWORD} ${CROSSPLANE_REPO} + END + RUN --push docker buildx imagetools create \ + --tag ${CROSSPLANE_REPO}:${CHANNEL} \ + --tag ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION}-${CHANNEL} \ + ${CROSSPLANE_REPO}:${CROSSPLANE_VERSION} + +# TODO(negz): Ideally ci-push-build-artifacts would be merged into ci-artifacts, +# i.e. just build and push them all in the same target. Currently we're relying +# on the fact that ci-artifacts does a bunch of SAVE ARTIFACT AS LOCAL, which +# ci-push-build-artifacts then loads. That's an anti-pattern in Earthly. We're +# supposed to use COPY instead, but I'm not sure how to COPY artifacts from a +# matrix build. + +# ci-push-build-artifacts is used by CI to push binary artifacts to S3. +ci-push-build-artifacts: + ARG --required CROSSPLANE_VERSION + ARG --required BUILD_DIR + ARG ARTIFACTS_DIR=_output + ARG BUCKET_RELEASES=crossplane.releases + ARG AWS_DEFAULT_REGION + FROM amazon/aws-cli:2.15.61 + COPY --dir ${ARTIFACTS_DIR} artifacts + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors artifacts s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION} + +# ci-promote-build-artifacts is used by CI to promote binary artifacts and Helm +# charts to a channel. In practice, this means copying them from one S3 +# directory to another. +ci-promote-build-artifacts: + ARG --required CROSSPLANE_VERSION + ARG --required BUILD_DIR + ARG --required CHANNEL + ARG HELM_REPO_URL=https://charts.crossplane.io + ARG BUCKET_RELEASES=crossplane.releases + ARG BUCKET_CHARTS=crossplane.charts + ARG PRERELEASE=false + ARG AWS_DEFAULT_REGION + FROM amazon/aws-cli:2.15.61 + COPY +helm-setup/helm /usr/local/bin/helm + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_CHARTS}/${CHANNEL} repo + RUN --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --only-show-errors s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION}/charts repo + RUN helm repo index --url ${HELM_REPO_URL}/${CHANNEL} repo + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors repo s3://${BUCKET_CHARTS}/${CHANNEL} + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 cp --only-show-errors --cache-control "private, max-age=0, no-transform" repo/index.yaml s3://${BUCKET_CHARTS}/${CHANNEL}/index.yaml + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/${CROSSPLANE_VERSION} + IF [ "${PRERELEASE}" = "false" ] + RUN --push --secret=AWS_ACCESS_KEY_ID --secret=AWS_SECRET_ACCESS_KEY aws s3 sync --delete --only-show-errors s3://${BUCKET_RELEASES}/build/${BUILD_DIR}/${CROSSPLANE_VERSION} s3://${BUCKET_RELEASES}/${CHANNEL}/current + END diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 541edeb33..9e1dd854d 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -67,11 +67,22 @@ Here are the members of the initial steering committee (listed in alphabetical o |   | Member | Organization | Email | Term Start | Term End | |-----------------------------------------------------------|------------------------------------------------|--------------|-----------------------------|------------|------------| | | [Bassam Tabbara](https://github.com/bassam) | Upbound | bassam@upbound.io | 2024-02-06 | 2026-02-06 | -| | [Brian Lindblom](https://github.com/brlindblom)| Apple | blindblom@apple.com | 2024-02-06 | 2025-02-07 | +| | [Brian Lindblom](https://github.com/lindblombr)| Apple | blindblom@apple.com | 2024-02-06 | 2025-02-07 | | | [Bob Haddleton](https://github.com/bobh66) | Nokia | bob.haddleton@nokia.com | 2024-02-06 | 2025-02-07 | | | [Jared Watts](https://github.com/jbw976) | Upbound | jared@upbound.io | 2024-02-06 | 2026-02-06 | | | [Nic Cope](https://github.com/negz) | Upbound | negz@upbound.io | 2024-02-06 | 2026-02-06 | +### Contact Info + +The steering committee can be reached at the following locations: + +* [`#steering-committee`](https://crossplane.slack.com/archives/C032WMA459S) + channel on the [Crossplane Slack](https://slack.crossplane.io/) workspace +* [`steering@crossplane.io`](mailto:steering@crossplane.io) public email address + +Members of the community as well as the broader ecosystem are welcome to contact +the steering committee for any issues or concerns they can assist with. + ### Election Process #### Eligibility for Voting diff --git a/Makefile b/Makefile deleted file mode 100644 index 6542715e3..000000000 --- a/Makefile +++ /dev/null @@ -1,185 +0,0 @@ -# ==================================================================================== -# Setup Project - -PROJECT_NAME := crossplane -PROJECT_REPO := github.com/crossplane/$(PROJECT_NAME) - -PLATFORMS ?= linux_amd64 linux_arm64 linux_arm linux_ppc64le darwin_amd64 darwin_arm64 windows_amd64 -# -include will silently skip missing files, which allows us -# to load those files with a target in the Makefile. If only -# "include" was used, the make command would fail and refuse -# to run a target until the include commands succeeded. --include build/makelib/common.mk - -# ==================================================================================== -# Setup Output - --include build/makelib/output.mk - -# ==================================================================================== -# Setup Go - -# Set a sane default so that the nprocs calculation below is less noisy on the initial -# loading of this file -NPROCS ?= 1 - -# each of our test suites starts a kube-apiserver and running many test suites in -# parallel can lead to high CPU utilization. by default we reduce the parallelism -# to half the number of CPU cores. -GO_TEST_PARALLEL := $(shell echo $$(( $(NPROCS) / 2 ))) - -GO_STATIC_PACKAGES = $(GO_PROJECT)/cmd/crossplane $(GO_PROJECT)/cmd/crank -GO_TEST_PACKAGES = $(GO_PROJECT)/test/e2e -GO_LDFLAGS += -X $(GO_PROJECT)/internal/version.version=$(shell echo $(VERSION) | sed 's/[\.,-]up.*//' ) -GO_SUBDIRS += cmd internal apis -GO111MODULE = on -GOLANGCILINT_VERSION = 1.55.2 -GO_LINT_ARGS ?= "--fix" - --include build/makelib/golang.mk - -# ==================================================================================== -# Setup Kubernetes tools - -USE_HELM3 = true -HELM3_VERSION = v3.14.0 -KIND_VERSION = v0.21.0 --include build/makelib/k8s_tools.mk - -# ==================================================================================== -# Setup Images -# Due to the way that the shared build logic works, images should -# all be in folders at the same level (no additional levels of nesting). - -REGISTRY_ORGS ?= docker.io/upbound xpkg.upbound.io/upbound -IMAGES = crossplane --include build/makelib/imagelight.mk - -# ==================================================================================== -# Targets - -# run `make help` to see the targets and options - -# We want submodules to be set up the first time `make` is run. -# We manage the build/ folder and its Makefiles as a submodule. -# The first time `make` is run, the includes of build/*.mk files will -# all fail, and this target will be run. The next time, the default as defined -# by the includes will be run instead. -fallthrough: submodules - @echo Initial setup complete. Running make again . . . - @make - -manifests: - @$(WARN) Deprecated. Please run make generate instead. - -CRD_DIR = cluster/crds - -crds.clean: - @$(INFO) cleaning generated CRDs - @find $(CRD_DIR) -name '*.yaml' -exec sed -i.sed -e '1,1d' {} \; || $(FAIL) - @find $(CRD_DIR) -name '*.yaml.sed' -delete || $(FAIL) - @$(OK) cleaned generated CRDs - -generate.run: gen-kustomize-crds gen-chart-license - -gen-chart-license: - @cp -f LICENSE cluster/charts/crossplane/LICENSE - -generate.done: crds.clean - -gen-kustomize-crds: - @$(INFO) Adding all CRDs to Kustomize file for local development - @rm cluster/kustomization.yaml - @echo "# This kustomization can be used to remotely install all Crossplane CRDs" >> cluster/kustomization.yaml - @echo "# by running kubectl apply -k https://github.com/crossplane/crossplane//cluster?ref=master" >> cluster/kustomization.yaml - @echo "resources:" >> cluster/kustomization.yaml - @find $(CRD_DIR) -type f -name '*.yaml' | sort | \ - while read filename ;\ - do echo "- $${filename#*/}" >> cluster/kustomization.yaml \ - ; done - @$(OK) All CRDs added to Kustomize file for local development - -# Generate a coverage report for cobertura applying exclusions on -# - generated file -cobertura: - @cat $(GO_TEST_OUTPUT)/coverage.txt | \ - grep -v zz_generated.deepcopy | \ - $(GOCOVER_COBERTURA) > $(GO_TEST_OUTPUT)/cobertura-coverage.xml - -e2e-tag-images: - @$(INFO) Tagging E2E test images - @docker tag $(BUILD_REGISTRY)/$(PROJECT_NAME)-$(TARGETARCH) crossplane-e2e/$(PROJECT_NAME):latest || $(FAIL) - @$(OK) Tagged E2E test images - -# NOTE(negz): There's already a go.test.integration target, but it's weird. -# This relies on make build building the e2e binary. -E2E_TEST_FLAGS ?= - -# TODO(negz): Ideally we'd just tell the E2E tests which CLI tools to invoke. -# https://github.com/kubernetes-sigs/e2e-framework/issues/282 -E2E_PATH = $(WORK_DIR)/e2e - -e2e-run-tests: - @$(INFO) Run E2E tests - @mkdir -p $(E2E_PATH) - @ln -sf $(KIND) $(E2E_PATH)/kind - @ln -sf $(HELM) $(E2E_PATH)/helm - @PATH="$(E2E_PATH):${PATH}" $(GO_TEST_OUTPUT)/e2e $(E2E_TEST_FLAGS) || $(FAIL) - @$(OK) Run E2E tests - -e2e.init: build e2e-tag-images - -e2e.run: $(KIND) $(HELM3) e2e-run-tests - -# Update the submodules, such as the common build scripts. -submodules: - @git submodule sync - @git submodule update --init --recursive - -# Install CRDs into a cluster. This is for convenience. -install-crds: $(KUBECTL) reviewable - $(KUBECTL) apply -f $(CRD_DIR) - -# Uninstall CRDs from a cluster. This is for convenience. -uninstall-crds: - $(KUBECTL) delete -f $(CRD_DIR) - -# NOTE(hasheddan): the build submodule currently overrides XDG_CACHE_HOME in -# order to force the Helm 3 to use the .work/helm directory. This causes Go on -# Linux machines to use that directory as the build cache as well. We should -# adjust this behavior in the build submodule because it is also causing Linux -# users to duplicate their build cache, but for now we just make it easier to -# identify its location in CI so that we cache between builds. -go.cachedir: - @go env GOCACHE - -# This is for running out-of-cluster locally, and is for convenience. Running -# this make target will print out the command which was used. For more control, -# try running the binary directly with different arguments. -run: go.build - @$(INFO) Running Crossplane locally out-of-cluster . . . - @# To see other arguments that can be provided, run the command with --help instead - $(GO_OUT_DIR)/$(PROJECT_NAME) core start --debug - -.PHONY: manifests cobertura submodules fallthrough test-integration run install-crds uninstall-crds gen-kustomize-crds e2e-tests-compile e2e.test.images - -# ==================================================================================== -# Special Targets - -define CROSSPLANE_MAKE_HELP -Crossplane Targets: - cobertura Generate a coverage report for cobertura applying exclusions on generated files. - submodules Update the submodules, such as the common build scripts. - run Run crossplane locally, out-of-cluster. Useful for development. - -endef -# The reason CROSSPLANE_MAKE_HELP is used instead of CROSSPLANE_HELP is because the crossplane -# binary will try to use CROSSPLANE_HELP if it is set, and this is for something different. -export CROSSPLANE_MAKE_HELP - -crossplane.help: - @echo "$$CROSSPLANE_MAKE_HELP" - -help-special: crossplane.help - -.PHONY: crossplane.help help-special diff --git a/ROADMAP.md b/ROADMAP.md index 2b539b4b9..737805b58 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -4,6 +4,16 @@ The public roadmap for Crossplane is published as a GitHub project board. Issues added to the roadmap have been triaged and identified as valuable to the community, and therefore a priority for the project that we expect to invest in. +The maintainer team regularly triages requests from the community to identify +features and issues of suitable scope and impact to include in this roadmap. The +community is encouraged to show their support for potential roadmap issues by +adding a :+1: reaction, leaving descriptive comments, and attending the +[regular community meetings] to discuss their requirements and use cases. + +The maintainer team updates the roadmap on an as needed basis, in response to +demand, priority, and available resources. The public roadmap can be updated at +any time. + Milestones assigned to any issues in the roadmap are intended to give a sense of overall priority and the expected order of delivery. They should be considered approximate estimations and are **not** a strict commitment to a specific @@ -11,4 +21,5 @@ delivery timeline. [Crossplane Roadmap] -[Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/3?pane=info +[Crossplane Roadmap]: https://github.com/orgs/crossplane/projects/20/views/9?pane=info +[regular community meetings]: https://github.com/crossplane/crossplane/blob/main/README.md#get-involved diff --git a/SECURITY.md b/SECURITY.md index 3265e9322..5b3504a42 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -38,7 +38,7 @@ To report a vulnerability, either: The reporter(s) can typically expect a response within 24 hours acknowledging the issue was received. If a response is not received within 24 hours, please reach out to any -[maintainer](https://github.com/crossplane/crossplane/blob/master/OWNERS.md#maintainers) +[maintainer](https://github.com/crossplane/crossplane/blob/main/OWNERS.md#maintainers) directly to confirm receipt of the issue. ### Report Content @@ -85,7 +85,7 @@ and mentioned in the fixed versions' release notes. ## Supported Versions -See [Crossplane's documentation](https://docs.crossplane.io/master/reference/release-cycle/) +See [Crossplane's documentation](https://docs.crossplane.io/latest/learn/release-cycle/) for information on supported versions of crossplane. Any supported release branch may receive security updates. For any security issues discovered on older versions, non-core packages, or dependencies, please inform maintainers diff --git a/apis/apiextensions/apiextensions.go b/apis/apiextensions/apiextensions.go index c4ee174cf..9d71f0494 100644 --- a/apis/apiextensions/apiextensions.go +++ b/apis/apiextensions/apiextensions.go @@ -34,10 +34,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/apiextensions/fn/proto/v1/run_function.pb.go b/apis/apiextensions/fn/proto/v1/run_function.pb.go new file mode 100644 index 000000000..f7080304c --- /dev/null +++ b/apis/apiextensions/fn/proto/v1/run_function.pb.go @@ -0,0 +1,1841 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: apiextensions/fn/proto/v1/run_function.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Ready indicates whether a composed resource should be considered ready. +type Ready int32 + +const ( + Ready_READY_UNSPECIFIED Ready = 0 + // True means the composed resource has been observed to be ready. + Ready_READY_TRUE Ready = 1 + // False means the composed resource has not been observed to be ready. + Ready_READY_FALSE Ready = 2 +) + +// Enum value maps for Ready. +var ( + Ready_name = map[int32]string{ + 0: "READY_UNSPECIFIED", + 1: "READY_TRUE", + 2: "READY_FALSE", + } + Ready_value = map[string]int32{ + "READY_UNSPECIFIED": 0, + "READY_TRUE": 1, + "READY_FALSE": 2, + } +) + +func (x Ready) Enum() *Ready { + p := new(Ready) + *p = x + return p +} + +func (x Ready) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ready) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[0].Descriptor() +} + +func (Ready) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[0] +} + +func (x Ready) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ready.Descriptor instead. +func (Ready) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{0} +} + +// Severity of Function results. +type Severity int32 + +const ( + Severity_SEVERITY_UNSPECIFIED Severity = 0 + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + Severity_SEVERITY_FATAL Severity = 1 + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + Severity_SEVERITY_WARNING Severity = 2 + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + Severity_SEVERITY_NORMAL Severity = 3 +) + +// Enum value maps for Severity. +var ( + Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "SEVERITY_FATAL", + 2: "SEVERITY_WARNING", + 3: "SEVERITY_NORMAL", + } + Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "SEVERITY_FATAL": 1, + "SEVERITY_WARNING": 2, + "SEVERITY_NORMAL": 3, + } +) + +func (x Severity) Enum() *Severity { + p := new(Severity) + *p = x + return p +} + +func (x Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Severity) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[1].Descriptor() +} + +func (Severity) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[1] +} + +func (x Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Severity.Descriptor instead. +func (Severity) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{1} +} + +// Target of Function results and conditions. +type Target int32 + +const ( + // If the target is unspecified, the result targets the composite resource. + Target_TARGET_UNSPECIFIED Target = 0 + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + Target_TARGET_COMPOSITE Target = 1 + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + Target_TARGET_COMPOSITE_AND_CLAIM Target = 2 +) + +// Enum value maps for Target. +var ( + Target_name = map[int32]string{ + 0: "TARGET_UNSPECIFIED", + 1: "TARGET_COMPOSITE", + 2: "TARGET_COMPOSITE_AND_CLAIM", + } + Target_value = map[string]int32{ + "TARGET_UNSPECIFIED": 0, + "TARGET_COMPOSITE": 1, + "TARGET_COMPOSITE_AND_CLAIM": 2, + } +) + +func (x Target) Enum() *Target { + p := new(Target) + *p = x + return p +} + +func (x Target) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Target) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[2].Descriptor() +} + +func (Target) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[2] +} + +func (x Target) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Target.Descriptor instead. +func (Target) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{2} +} + +type Status int32 + +const ( + Status_STATUS_CONDITION_UNSPECIFIED Status = 0 + Status_STATUS_CONDITION_UNKNOWN Status = 1 + Status_STATUS_CONDITION_TRUE Status = 2 + Status_STATUS_CONDITION_FALSE Status = 3 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "STATUS_CONDITION_UNSPECIFIED", + 1: "STATUS_CONDITION_UNKNOWN", + 2: "STATUS_CONDITION_TRUE", + 3: "STATUS_CONDITION_FALSE", + } + Status_value = map[string]int32{ + "STATUS_CONDITION_UNSPECIFIED": 0, + "STATUS_CONDITION_UNKNOWN": 1, + "STATUS_CONDITION_TRUE": 2, + "STATUS_CONDITION_FALSE": 3, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[3].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1_run_function_proto_enumTypes[3] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{3} +} + +// A RunFunctionRequest requests that the Composition Function be run. +type RunFunctionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this request. + Meta *RequestMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + Observed *State `protobuf:"bytes,2,opt,name=observed,proto3" json:"observed,omitempty"` + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by previous Functions in the + // pipeline. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + Desired *State `protobuf:"bytes,3,opt,name=desired,proto3" json:"desired,omitempty"` + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + Input *structpb.Struct `protobuf:"bytes,4,opt,name=input,proto3,oneof" json:"input,omitempty"` + // Optional context. Crossplane may pass arbitary contextual information to a + // Function. A Function may also return context in its RunFunctionResponse, + // and that context will be passed to subsequent Functions. Crossplane + // discards all context returned by the last Function in the pipeline. + Context *structpb.Struct `protobuf:"bytes,5,opt,name=context,proto3,oneof" json:"context,omitempty"` + // Optional extra resources that the Function required. + // Note that extra resources is a map to Resources, plural. + // The map key corresponds to the key in a RunFunctionResponse's + // extra_resources field. If a Function requested extra resources that + // did not exist, Crossplane sets the map key to an empty Resources message to + // indicate that it attempted to satisfy the request. + ExtraResources map[string]*Resources `protobuf:"bytes,6,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional credentials that this Function may use to communicate with an + // external system. + Credentials map[string]*Credentials `protobuf:"bytes,7,rep,name=credentials,proto3" json:"credentials,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RunFunctionRequest) Reset() { + *x = RunFunctionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionRequest) ProtoMessage() {} + +func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionRequest.ProtoReflect.Descriptor instead. +func (*RunFunctionRequest) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{0} +} + +func (x *RunFunctionRequest) GetMeta() *RequestMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionRequest) GetObserved() *State { + if x != nil { + return x.Observed + } + return nil +} + +func (x *RunFunctionRequest) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionRequest) GetInput() *structpb.Struct { + if x != nil { + return x.Input + } + return nil +} + +func (x *RunFunctionRequest) GetContext() *structpb.Struct { + if x != nil { + return x.Context + } + return nil +} + +func (x *RunFunctionRequest) GetExtraResources() map[string]*Resources { + if x != nil { + return x.ExtraResources + } + return nil +} + +func (x *RunFunctionRequest) GetCredentials() map[string]*Credentials { + if x != nil { + return x.Credentials + } + return nil +} + +// Credentials that a Function may use to communicate with an external system. +type Credentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Source of the credentials. + // + // Types that are assignable to Source: + // + // *Credentials_CredentialData + Source isCredentials_Source `protobuf_oneof:"source"` +} + +func (x *Credentials) Reset() { + *x = Credentials{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credentials) ProtoMessage() {} + +func (x *Credentials) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credentials.ProtoReflect.Descriptor instead. +func (*Credentials) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{1} +} + +func (m *Credentials) GetSource() isCredentials_Source { + if m != nil { + return m.Source + } + return nil +} + +func (x *Credentials) GetCredentialData() *CredentialData { + if x, ok := x.GetSource().(*Credentials_CredentialData); ok { + return x.CredentialData + } + return nil +} + +type isCredentials_Source interface { + isCredentials_Source() +} + +type Credentials_CredentialData struct { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData *CredentialData `protobuf:"bytes,1,opt,name=credential_data,json=credentialData,proto3,oneof"` +} + +func (*Credentials_CredentialData) isCredentials_Source() {} + +// CredentialData loaded by Crossplane, for example from a Secret. +type CredentialData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data map[string][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CredentialData) Reset() { + *x = CredentialData{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CredentialData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialData) ProtoMessage() {} + +func (x *CredentialData) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CredentialData.ProtoReflect.Descriptor instead. +func (*CredentialData) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{2} +} + +func (x *CredentialData) GetData() map[string][]byte { + if x != nil { + return x.Data + } + return nil +} + +// Resources represents the state of several Crossplane resources. +type Resources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items []*Resource `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (x *Resources) Reset() { + *x = Resources{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resources) ProtoMessage() {} + +func (x *Resources) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resources.ProtoReflect.Descriptor instead. +func (*Resources) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{3} +} + +func (x *Resources) GetItems() []*Resource { + if x != nil { + return x.Items + } + return nil +} + +// A RunFunctionResponse contains the result of a Composition Function run. +type RunFunctionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this response. + Meta *ResponseMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + Desired *State `protobuf:"bytes,2,opt,name=desired,proto3" json:"desired,omitempty"` + // Results of the Function run. Results are used for observability purposes. + Results []*Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + // Optional context to be passed to the next Function in the pipeline as part + // of the RunFunctionRequest. Dropped on the last function in the pipeline. + Context *structpb.Struct `protobuf:"bytes,4,opt,name=context,proto3,oneof" json:"context,omitempty"` + // Requirements that must be satisfied for this Function to run successfully. + Requirements *Requirements `protobuf:"bytes,5,opt,name=requirements,proto3" json:"requirements,omitempty"` + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + Conditions []*Condition `protobuf:"bytes,6,rep,name=conditions,proto3" json:"conditions,omitempty"` +} + +func (x *RunFunctionResponse) Reset() { + *x = RunFunctionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionResponse) ProtoMessage() {} + +func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. +func (*RunFunctionResponse) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{4} +} + +func (x *RunFunctionResponse) GetMeta() *ResponseMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionResponse) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionResponse) GetResults() []*Result { + if x != nil { + return x.Results + } + return nil +} + +func (x *RunFunctionResponse) GetContext() *structpb.Struct { + if x != nil { + return x.Context + } + return nil +} + +func (x *RunFunctionResponse) GetRequirements() *Requirements { + if x != nil { + return x.Requirements + } + return nil +} + +func (x *RunFunctionResponse) GetConditions() []*Condition { + if x != nil { + return x.Conditions + } + return nil +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +type RequestMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` +} + +func (x *RequestMeta) Reset() { + *x = RequestMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestMeta) ProtoMessage() {} + +func (x *RequestMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. +func (*RequestMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{5} +} + +func (x *RequestMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +// Requirements that must be satisfied for a Function to run successfully. +type Requirements struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extra resources that this Function requires. + // The map key uniquely identifies the group of resources. + ExtraResources map[string]*ResourceSelector `protobuf:"bytes,1,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Requirements) Reset() { + *x = Requirements{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Requirements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Requirements) ProtoMessage() {} + +func (x *Requirements) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Requirements.ProtoReflect.Descriptor instead. +func (*Requirements) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{6} +} + +func (x *Requirements) GetExtraResources() map[string]*ResourceSelector { + if x != nil { + return x.ExtraResources + } + return nil +} + +// ResourceSelector selects a group of resources, either by name or by label. +type ResourceSelector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API version of resources to select. + ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + // Kind of resources to select. + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // Resources to match. + // + // Types that are assignable to Match: + // + // *ResourceSelector_MatchName + // *ResourceSelector_MatchLabels + Match isResourceSelector_Match `protobuf_oneof:"match"` +} + +func (x *ResourceSelector) Reset() { + *x = ResourceSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSelector) ProtoMessage() {} + +func (x *ResourceSelector) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. +func (*ResourceSelector) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceSelector) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *ResourceSelector) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (m *ResourceSelector) GetMatch() isResourceSelector_Match { + if m != nil { + return m.Match + } + return nil +} + +func (x *ResourceSelector) GetMatchName() string { + if x, ok := x.GetMatch().(*ResourceSelector_MatchName); ok { + return x.MatchName + } + return "" +} + +func (x *ResourceSelector) GetMatchLabels() *MatchLabels { + if x, ok := x.GetMatch().(*ResourceSelector_MatchLabels); ok { + return x.MatchLabels + } + return nil +} + +type isResourceSelector_Match interface { + isResourceSelector_Match() +} + +type ResourceSelector_MatchName struct { + // Match the resource with this name. + MatchName string `protobuf:"bytes,3,opt,name=match_name,json=matchName,proto3,oneof"` +} + +type ResourceSelector_MatchLabels struct { + // Match all resources with these labels. + MatchLabels *MatchLabels `protobuf:"bytes,4,opt,name=match_labels,json=matchLabels,proto3,oneof"` +} + +func (*ResourceSelector_MatchName) isResourceSelector_Match() {} + +func (*ResourceSelector_MatchLabels) isResourceSelector_Match() {} + +// MatchLabels defines a set of labels to match resources against. +type MatchLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MatchLabels) Reset() { + *x = MatchLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchLabels) ProtoMessage() {} + +func (x *MatchLabels) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchLabels.ProtoReflect.Descriptor instead. +func (*MatchLabels) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{8} +} + +func (x *MatchLabels) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +type ResponseMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3,oneof" json:"ttl,omitempty"` +} + +func (x *ResponseMeta) Reset() { + *x = ResponseMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMeta) ProtoMessage() {} + +func (x *ResponseMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. +func (*ResponseMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{9} +} + +func (x *ResponseMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *ResponseMeta) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +// State of the composite resource (XR) and any composed resources. +type State struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the composite resource (XR). + Composite *Resource `protobuf:"bytes,1,opt,name=composite,proto3" json:"composite,omitempty"` + // The state of any composed resources. + Resources map[string]*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *State) Reset() { + *x = State{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *State) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*State) ProtoMessage() {} + +func (x *State) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use State.ProtoReflect.Descriptor instead. +func (*State) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{10} +} + +func (x *State) GetComposite() *Resource { + if x != nil { + return x.Composite + } + return nil +} + +func (x *State) GetResources() map[string]*Resource { + if x != nil { + return x.Resources + } + return nil +} + +// A Resource represents the state of a composite or composed resource. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The JSON representation of the resource. + // + // - Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. + // + // - A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. + // + // - A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. + // + // - A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. + Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The resource's connection details. + // + // - Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. + // + // - A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. + // + // - A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. + ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Ready indicates whether the resource should be considered ready. + // + // * Crossplane will never set this field in a RunFunctionRequest. + // + // - A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. + // + // - A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. + Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1.Ready" json:"ready,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{11} +} + +func (x *Resource) GetResource() *structpb.Struct { + if x != nil { + return x.Resource + } + return nil +} + +func (x *Resource) GetConnectionDetails() map[string][]byte { + if x != nil { + return x.ConnectionDetails + } + return nil +} + +func (x *Resource) GetReady() Ready { + if x != nil { + return x.Ready + } + return Ready_READY_UNSPECIFIED +} + +// A Result of running a Function. +type Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Severity of this result. + Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1.Severity" json:"severity,omitempty"` + // Human-readable details about the result. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` + // The resources this result targets. + Target *Target `protobuf:"varint,4,opt,name=target,proto3,enum=apiextensions.fn.proto.v1.Target,oneof" json:"target,omitempty"` +} + +func (x *Result) Reset() { + *x = Result{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Result) ProtoMessage() {} + +func (x *Result) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Result.ProtoReflect.Descriptor instead. +func (*Result) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{12} +} + +func (x *Result) GetSeverity() Severity { + if x != nil { + return x.Severity + } + return Severity_SEVERITY_UNSPECIFIED +} + +func (x *Result) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Result) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +func (x *Result) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +type Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of condition in PascalCase. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Status of the condition. + Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=apiextensions.fn.proto.v1.Status" json:"status,omitempty"` + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + Message *string `protobuf:"bytes,4,opt,name=message,proto3,oneof" json:"message,omitempty"` + // The resources this condition targets. + Target *Target `protobuf:"varint,5,opt,name=target,proto3,enum=apiextensions.fn.proto.v1.Target,oneof" json:"target,omitempty"` +} + +func (x *Condition) Reset() { + *x = Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Condition) ProtoMessage() {} + +func (x *Condition) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Condition.ProtoReflect.Descriptor instead. +func (*Condition) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP(), []int{13} +} + +func (x *Condition) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Condition) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_STATUS_CONDITION_UNSPECIFIED +} + +func (x *Condition) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *Condition) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +func (x *Condition) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +var File_apiextensions_fn_proto_v1_run_function_proto protoreflect.FileDescriptor + +var file_apiextensions_fn_proto_v1_run_function_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x75, 0x6e, 0x5f, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xeb, 0x05, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, + 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x08, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, + 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, + 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, + 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x48, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, + 0x12, 0x6a, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x60, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, + 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x67, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x66, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, + 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x6d, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x54, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x46, 0x0a, 0x09, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, + 0x73, 0x22, 0xa2, 0x03, 0x0a, 0x13, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x65, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, + 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, 0x4b, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, + 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xe4, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x6e, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbe, + 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, + 0x94, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, + 0x74, 0x6c, 0x22, 0xfc, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x09, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, + 0x4d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x61, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xa8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x36, + 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, + 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x01, 0x0a, + 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x3f, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, + 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, + 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, + 0x3e, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x2a, 0x3f, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, + 0x44, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, + 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, + 0x02, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, + 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, + 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x2a, 0x56, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x1e, + 0x0a, 0x1a, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x10, 0x02, 0x2a, 0x7f, + 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x0a, 0x1c, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, + 0x45, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, + 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x03, 0x32, + 0x87, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x6e, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x0b, 0x52, 0x75, 0x6e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_apiextensions_fn_proto_v1_run_function_proto_rawDescOnce sync.Once + file_apiextensions_fn_proto_v1_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1_run_function_proto_rawDesc +) + +func file_apiextensions_fn_proto_v1_run_function_proto_rawDescGZIP() []byte { + file_apiextensions_fn_proto_v1_run_function_proto_rawDescOnce.Do(func() { + file_apiextensions_fn_proto_v1_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1_run_function_proto_rawDescData) + }) + return file_apiextensions_fn_proto_v1_run_function_proto_rawDescData +} + +var file_apiextensions_fn_proto_v1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_apiextensions_fn_proto_v1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_apiextensions_fn_proto_v1_run_function_proto_goTypes = []any{ + (Ready)(0), // 0: apiextensions.fn.proto.v1.Ready + (Severity)(0), // 1: apiextensions.fn.proto.v1.Severity + (Target)(0), // 2: apiextensions.fn.proto.v1.Target + (Status)(0), // 3: apiextensions.fn.proto.v1.Status + (*RunFunctionRequest)(nil), // 4: apiextensions.fn.proto.v1.RunFunctionRequest + (*Credentials)(nil), // 5: apiextensions.fn.proto.v1.Credentials + (*CredentialData)(nil), // 6: apiextensions.fn.proto.v1.CredentialData + (*Resources)(nil), // 7: apiextensions.fn.proto.v1.Resources + (*RunFunctionResponse)(nil), // 8: apiextensions.fn.proto.v1.RunFunctionResponse + (*RequestMeta)(nil), // 9: apiextensions.fn.proto.v1.RequestMeta + (*Requirements)(nil), // 10: apiextensions.fn.proto.v1.Requirements + (*ResourceSelector)(nil), // 11: apiextensions.fn.proto.v1.ResourceSelector + (*MatchLabels)(nil), // 12: apiextensions.fn.proto.v1.MatchLabels + (*ResponseMeta)(nil), // 13: apiextensions.fn.proto.v1.ResponseMeta + (*State)(nil), // 14: apiextensions.fn.proto.v1.State + (*Resource)(nil), // 15: apiextensions.fn.proto.v1.Resource + (*Result)(nil), // 16: apiextensions.fn.proto.v1.Result + (*Condition)(nil), // 17: apiextensions.fn.proto.v1.Condition + nil, // 18: apiextensions.fn.proto.v1.RunFunctionRequest.ExtraResourcesEntry + nil, // 19: apiextensions.fn.proto.v1.RunFunctionRequest.CredentialsEntry + nil, // 20: apiextensions.fn.proto.v1.CredentialData.DataEntry + nil, // 21: apiextensions.fn.proto.v1.Requirements.ExtraResourcesEntry + nil, // 22: apiextensions.fn.proto.v1.MatchLabels.LabelsEntry + nil, // 23: apiextensions.fn.proto.v1.State.ResourcesEntry + nil, // 24: apiextensions.fn.proto.v1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 25: google.protobuf.Struct + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration +} +var file_apiextensions_fn_proto_v1_run_function_proto_depIdxs = []int32{ + 9, // 0: apiextensions.fn.proto.v1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1.RequestMeta + 14, // 1: apiextensions.fn.proto.v1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1.State + 14, // 2: apiextensions.fn.proto.v1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1.State + 25, // 3: apiextensions.fn.proto.v1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 25, // 4: apiextensions.fn.proto.v1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 18, // 5: apiextensions.fn.proto.v1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1.RunFunctionRequest.ExtraResourcesEntry + 19, // 6: apiextensions.fn.proto.v1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1.RunFunctionRequest.CredentialsEntry + 6, // 7: apiextensions.fn.proto.v1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1.CredentialData + 20, // 8: apiextensions.fn.proto.v1.CredentialData.data:type_name -> apiextensions.fn.proto.v1.CredentialData.DataEntry + 15, // 9: apiextensions.fn.proto.v1.Resources.items:type_name -> apiextensions.fn.proto.v1.Resource + 13, // 10: apiextensions.fn.proto.v1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1.ResponseMeta + 14, // 11: apiextensions.fn.proto.v1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1.State + 16, // 12: apiextensions.fn.proto.v1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1.Result + 25, // 13: apiextensions.fn.proto.v1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 10, // 14: apiextensions.fn.proto.v1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1.Requirements + 17, // 15: apiextensions.fn.proto.v1.RunFunctionResponse.conditions:type_name -> apiextensions.fn.proto.v1.Condition + 21, // 16: apiextensions.fn.proto.v1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1.Requirements.ExtraResourcesEntry + 12, // 17: apiextensions.fn.proto.v1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1.MatchLabels + 22, // 18: apiextensions.fn.proto.v1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1.MatchLabels.LabelsEntry + 26, // 19: apiextensions.fn.proto.v1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 15, // 20: apiextensions.fn.proto.v1.State.composite:type_name -> apiextensions.fn.proto.v1.Resource + 23, // 21: apiextensions.fn.proto.v1.State.resources:type_name -> apiextensions.fn.proto.v1.State.ResourcesEntry + 25, // 22: apiextensions.fn.proto.v1.Resource.resource:type_name -> google.protobuf.Struct + 24, // 23: apiextensions.fn.proto.v1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1.Resource.ConnectionDetailsEntry + 0, // 24: apiextensions.fn.proto.v1.Resource.ready:type_name -> apiextensions.fn.proto.v1.Ready + 1, // 25: apiextensions.fn.proto.v1.Result.severity:type_name -> apiextensions.fn.proto.v1.Severity + 2, // 26: apiextensions.fn.proto.v1.Result.target:type_name -> apiextensions.fn.proto.v1.Target + 3, // 27: apiextensions.fn.proto.v1.Condition.status:type_name -> apiextensions.fn.proto.v1.Status + 2, // 28: apiextensions.fn.proto.v1.Condition.target:type_name -> apiextensions.fn.proto.v1.Target + 7, // 29: apiextensions.fn.proto.v1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1.Resources + 5, // 30: apiextensions.fn.proto.v1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1.Credentials + 11, // 31: apiextensions.fn.proto.v1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1.ResourceSelector + 15, // 32: apiextensions.fn.proto.v1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1.Resource + 4, // 33: apiextensions.fn.proto.v1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1.RunFunctionRequest + 8, // 34: apiextensions.fn.proto.v1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1.RunFunctionResponse + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_apiextensions_fn_proto_v1_run_function_proto_init() } +func file_apiextensions_fn_proto_v1_run_function_proto_init() { + if File_apiextensions_fn_proto_v1_run_function_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RunFunctionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Credentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CredentialData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Resources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*RunFunctionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*RequestMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*Requirements); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ResourceSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*MatchLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ResponseMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*State); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[0].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[1].OneofWrappers = []any{ + (*Credentials_CredentialData)(nil), + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[4].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[7].OneofWrappers = []any{ + (*ResourceSelector_MatchName)(nil), + (*ResourceSelector_MatchLabels)(nil), + } + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[9].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[12].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1_run_function_proto_msgTypes[13].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apiextensions_fn_proto_v1_run_function_proto_rawDesc, + NumEnums: 4, + NumMessages: 21, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_apiextensions_fn_proto_v1_run_function_proto_goTypes, + DependencyIndexes: file_apiextensions_fn_proto_v1_run_function_proto_depIdxs, + EnumInfos: file_apiextensions_fn_proto_v1_run_function_proto_enumTypes, + MessageInfos: file_apiextensions_fn_proto_v1_run_function_proto_msgTypes, + }.Build() + File_apiextensions_fn_proto_v1_run_function_proto = out.File + file_apiextensions_fn_proto_v1_run_function_proto_rawDesc = nil + file_apiextensions_fn_proto_v1_run_function_proto_goTypes = nil + file_apiextensions_fn_proto_v1_run_function_proto_depIdxs = nil +} diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.proto b/apis/apiextensions/fn/proto/v1/run_function.proto similarity index 75% rename from apis/apiextensions/fn/proto/v1beta1/run_function.proto rename to apis/apiextensions/fn/proto/v1/run_function.proto index 0f53ef19f..b66e970fd 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.proto +++ b/apis/apiextensions/fn/proto/v1/run_function.proto @@ -19,9 +19,9 @@ syntax = "proto3"; import "google/protobuf/struct.proto"; import "google/protobuf/duration.proto"; -package apiextensions.fn.proto.v1beta1; +package apiextensions.fn.proto.v1; -option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1"; +option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1"; // A FunctionRunnerService is a Composition Function. service FunctionRunnerService { @@ -68,6 +68,24 @@ message RunFunctionRequest { // did not exist, Crossplane sets the map key to an empty Resources message to // indicate that it attempted to satisfy the request. map extra_resources = 6; + + // Optional credentials that this Function may use to communicate with an + // external system. + map credentials = 7; +} + +// Credentials that a Function may use to communicate with an external system. +message Credentials { + // Source of the credentials. + oneof source { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData credential_data = 1; + } +} + +// CredentialData loaded by Crossplane, for example from a Secret. +message CredentialData { + map data = 1; } // Resources represents the state of several Crossplane resources. @@ -103,6 +121,10 @@ message RunFunctionResponse { // Requirements that must be satisfied for this Function to run successfully. Requirements requirements = 5; + + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + repeated Condition conditions = 6; } // RequestMeta contains metadata pertaining to a RunFunctionRequest. @@ -121,11 +143,18 @@ message Requirements { // ResourceSelector selects a group of resources, either by name or by label. message ResourceSelector { + // API version of resources to select. string api_version = 1; + + // Kind of resources to select. string kind = 2; + // Resources to match. oneof match { + // Match the resource with this name. string match_name = 3; + + // Match all resources with these labels. MatchLabels match_labels = 4; } } @@ -218,6 +247,13 @@ message Result { // Human-readable details about the result. string message = 2; + + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + optional string reason = 3; + + // The resources this result targets. + optional Target target = 4; } // Severity of Function results. @@ -237,4 +273,54 @@ enum Severity { // Normal results are emitted as normal events and debug logs associated // with the composite resource. SEVERITY_NORMAL = 3; -} \ No newline at end of file +} + +// Target of Function results and conditions. +enum Target { + // If the target is unspecified, the result targets the composite resource. + TARGET_UNSPECIFIED = 0; + + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + TARGET_COMPOSITE = 1; + + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + TARGET_COMPOSITE_AND_CLAIM = 2; +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +message Condition { + // Type of condition in PascalCase. + string type = 1; + + // Status of the condition. + Status status = 2; + + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + string reason = 3; + + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + optional string message = 4; + + // The resources this condition targets. + optional Target target = 5; +} + +enum Status { + STATUS_CONDITION_UNSPECIFIED = 0; + + STATUS_CONDITION_UNKNOWN = 1; + + STATUS_CONDITION_TRUE = 2; + + STATUS_CONDITION_FALSE = 3; +} diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go b/apis/apiextensions/fn/proto/v1/run_function_grpc.pb.go similarity index 94% rename from apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go rename to apis/apiextensions/fn/proto/v1/run_function_grpc.pb.go index 5ea2c19d6..81d40b60b 100644 --- a/apis/apiextensions/fn/proto/v1beta1/run_function_grpc.pb.go +++ b/apis/apiextensions/fn/proto/v1/run_function_grpc.pb.go @@ -17,9 +17,9 @@ // versions: // - protoc-gen-go-grpc v1.3.0 // - protoc (unknown) -// source: apiextensions/fn/proto/v1beta1/run_function.proto +// source: apiextensions/fn/proto/v1/run_function.proto -package v1beta1 +package v1 import ( context "context" @@ -34,7 +34,7 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( - FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1beta1.FunctionRunnerService/RunFunction" + FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1.FunctionRunnerService/RunFunction" ) // FunctionRunnerServiceClient is the client API for FunctionRunnerService service. @@ -113,7 +113,7 @@ func _FunctionRunnerService_RunFunction_Handler(srv interface{}, ctx context.Con // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "apiextensions.fn.proto.v1beta1.FunctionRunnerService", + ServiceName: "apiextensions.fn.proto.v1.FunctionRunnerService", HandlerType: (*FunctionRunnerServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -122,5 +122,5 @@ var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "apiextensions/fn/proto/v1beta1/run_function.proto", + Metadata: "apiextensions/fn/proto/v1/run_function.proto", } diff --git a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go deleted file mode 100644 index d68823ae6..000000000 --- a/apis/apiextensions/fn/proto/v1beta1/run_function.pb.go +++ /dev/null @@ -1,1360 +0,0 @@ -// -//Copyright 2022 The Crossplane Authors. -// -//Licensed under the Apache License, Version 2.0 (the "License"); -//you may not use this file except in compliance with the License. -//You may obtain a copy of the License at -// -//http://www.apache.org/licenses/LICENSE-2.0 -// -//Unless required by applicable law or agreed to in writing, software -//distributed under the License is distributed on an "AS IS" BASIS, -//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -//See the License for the specific language governing permissions and -//limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: apiextensions/fn/proto/v1beta1/run_function.proto - -package v1beta1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Ready indicates whether a composed resource should be considered ready. -type Ready int32 - -const ( - Ready_READY_UNSPECIFIED Ready = 0 - // True means the composed resource has been observed to be ready. - Ready_READY_TRUE Ready = 1 - // False means the composed resource has not been observed to be ready. - Ready_READY_FALSE Ready = 2 -) - -// Enum value maps for Ready. -var ( - Ready_name = map[int32]string{ - 0: "READY_UNSPECIFIED", - 1: "READY_TRUE", - 2: "READY_FALSE", - } - Ready_value = map[string]int32{ - "READY_UNSPECIFIED": 0, - "READY_TRUE": 1, - "READY_FALSE": 2, - } -) - -func (x Ready) Enum() *Ready { - p := new(Ready) - *p = x - return p -} - -func (x Ready) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Ready) Descriptor() protoreflect.EnumDescriptor { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[0].Descriptor() -} - -func (Ready) Type() protoreflect.EnumType { - return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[0] -} - -func (x Ready) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Ready.Descriptor instead. -func (Ready) EnumDescriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{0} -} - -// Severity of Function results. -type Severity int32 - -const ( - Severity_SEVERITY_UNSPECIFIED Severity = 0 - // Fatal results are fatal; subsequent Composition Functions may run, but - // the Composition Function pipeline run will be considered a failure and - // the first fatal result will be returned as an error. - Severity_SEVERITY_FATAL Severity = 1 - // Warning results are non-fatal; the entire Composition will run to - // completion but warning events and debug logs associated with the - // composite resource will be emitted. - Severity_SEVERITY_WARNING Severity = 2 - // Normal results are emitted as normal events and debug logs associated - // with the composite resource. - Severity_SEVERITY_NORMAL Severity = 3 -) - -// Enum value maps for Severity. -var ( - Severity_name = map[int32]string{ - 0: "SEVERITY_UNSPECIFIED", - 1: "SEVERITY_FATAL", - 2: "SEVERITY_WARNING", - 3: "SEVERITY_NORMAL", - } - Severity_value = map[string]int32{ - "SEVERITY_UNSPECIFIED": 0, - "SEVERITY_FATAL": 1, - "SEVERITY_WARNING": 2, - "SEVERITY_NORMAL": 3, - } -) - -func (x Severity) Enum() *Severity { - p := new(Severity) - *p = x - return p -} - -func (x Severity) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Severity) Descriptor() protoreflect.EnumDescriptor { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[1].Descriptor() -} - -func (Severity) Type() protoreflect.EnumType { - return &file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes[1] -} - -func (x Severity) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Severity.Descriptor instead. -func (Severity) EnumDescriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} -} - -// A RunFunctionRequest requests that the Composition Function be run. -type RunFunctionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Metadata pertaining to this request. - Meta *RequestMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` - // The observed state prior to invocation of a Function pipeline. State passed - // to each Function is fresh as of the time the pipeline was invoked, not as - // of the time each Function was invoked. - Observed *State `protobuf:"bytes,2,opt,name=observed,proto3" json:"observed,omitempty"` - // Desired state according to a Function pipeline. The state passed to a - // particular Function may have been accumulated by previous Functions in the - // pipeline. - // - // Note that the desired state must be a partial object with only the fields - // that this function (and its predecessors in the pipeline) wants to have - // set in the object. Copying a non-partial observed state to desired is most - // likely not what you want to do. Leaving out fields that had been returned - // as desired before will result in them being deleted from the objects in the - // cluster. - Desired *State `protobuf:"bytes,3,opt,name=desired,proto3" json:"desired,omitempty"` - // Optional input specific to this Function invocation. A JSON representation - // of the 'input' block of the relevant entry in a Composition's pipeline. - Input *structpb.Struct `protobuf:"bytes,4,opt,name=input,proto3,oneof" json:"input,omitempty"` - // Optional context. Crossplane may pass arbitary contextual information to a - // Function. A Function may also return context in its RunFunctionResponse, - // and that context will be passed to subsequent Functions. Crossplane - // discards all context returned by the last Function in the pipeline. - Context *structpb.Struct `protobuf:"bytes,5,opt,name=context,proto3,oneof" json:"context,omitempty"` - // Optional extra resources that the Function required. - // Note that extra resources is a map to Resources, plural. - // The map key corresponds to the key in a RunFunctionResponse's - // extra_resources field. If a Function requested extra resources that - // did not exist, Crossplane sets the map key to an empty Resources message to - // indicate that it attempted to satisfy the request. - ExtraResources map[string]*Resources `protobuf:"bytes,6,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *RunFunctionRequest) Reset() { - *x = RunFunctionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RunFunctionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RunFunctionRequest) ProtoMessage() {} - -func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RunFunctionRequest.ProtoReflect.Descriptor instead. -func (*RunFunctionRequest) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{0} -} - -func (x *RunFunctionRequest) GetMeta() *RequestMeta { - if x != nil { - return x.Meta - } - return nil -} - -func (x *RunFunctionRequest) GetObserved() *State { - if x != nil { - return x.Observed - } - return nil -} - -func (x *RunFunctionRequest) GetDesired() *State { - if x != nil { - return x.Desired - } - return nil -} - -func (x *RunFunctionRequest) GetInput() *structpb.Struct { - if x != nil { - return x.Input - } - return nil -} - -func (x *RunFunctionRequest) GetContext() *structpb.Struct { - if x != nil { - return x.Context - } - return nil -} - -func (x *RunFunctionRequest) GetExtraResources() map[string]*Resources { - if x != nil { - return x.ExtraResources - } - return nil -} - -// Resources represents the state of several Crossplane resources. -type Resources struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Items []*Resource `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` -} - -func (x *Resources) Reset() { - *x = Resources{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Resources) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Resources) ProtoMessage() {} - -func (x *Resources) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Resources.ProtoReflect.Descriptor instead. -func (*Resources) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{1} -} - -func (x *Resources) GetItems() []*Resource { - if x != nil { - return x.Items - } - return nil -} - -// A RunFunctionResponse contains the result of a Composition Function run. -type RunFunctionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Metadata pertaining to this response. - Meta *ResponseMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` - // Desired state according to a Function pipeline. Functions may add desired - // state, and may mutate or delete any part of the desired state they are - // concerned with. A Function must pass through any part of the desired state - // that it is not concerned with. - // - // Note that the desired state must be a partial object with only the fields - // that this function (and its predecessors in the pipeline) wants to have - // set in the object. Copying a non-partial observed state to desired is most - // likely not what you want to do. Leaving out fields that had been returned - // as desired before will result in them being deleted from the objects in the - // cluster. - Desired *State `protobuf:"bytes,2,opt,name=desired,proto3" json:"desired,omitempty"` - // Results of the Function run. Results are used for observability purposes. - Results []*Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` - // Optional context to be passed to the next Function in the pipeline as part - // of the RunFunctionRequest. Dropped on the last function in the pipeline. - Context *structpb.Struct `protobuf:"bytes,4,opt,name=context,proto3,oneof" json:"context,omitempty"` - // Requirements that must be satisfied for this Function to run successfully. - Requirements *Requirements `protobuf:"bytes,5,opt,name=requirements,proto3" json:"requirements,omitempty"` -} - -func (x *RunFunctionResponse) Reset() { - *x = RunFunctionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RunFunctionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RunFunctionResponse) ProtoMessage() {} - -func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. -func (*RunFunctionResponse) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{2} -} - -func (x *RunFunctionResponse) GetMeta() *ResponseMeta { - if x != nil { - return x.Meta - } - return nil -} - -func (x *RunFunctionResponse) GetDesired() *State { - if x != nil { - return x.Desired - } - return nil -} - -func (x *RunFunctionResponse) GetResults() []*Result { - if x != nil { - return x.Results - } - return nil -} - -func (x *RunFunctionResponse) GetContext() *structpb.Struct { - if x != nil { - return x.Context - } - return nil -} - -func (x *RunFunctionResponse) GetRequirements() *Requirements { - if x != nil { - return x.Requirements - } - return nil -} - -// RequestMeta contains metadata pertaining to a RunFunctionRequest. -type RequestMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An opaque string identifying the content of the request. Two identical - // requests should have the same tag. - Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` -} - -func (x *RequestMeta) Reset() { - *x = RequestMeta{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RequestMeta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestMeta) ProtoMessage() {} - -func (x *RequestMeta) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. -func (*RequestMeta) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{3} -} - -func (x *RequestMeta) GetTag() string { - if x != nil { - return x.Tag - } - return "" -} - -// Requirements that must be satisfied for a Function to run successfully. -type Requirements struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Extra resources that this Function requires. - // The map key uniquely identifies the group of resources. - ExtraResources map[string]*ResourceSelector `protobuf:"bytes,1,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *Requirements) Reset() { - *x = Requirements{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Requirements) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Requirements) ProtoMessage() {} - -func (x *Requirements) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Requirements.ProtoReflect.Descriptor instead. -func (*Requirements) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{4} -} - -func (x *Requirements) GetExtraResources() map[string]*ResourceSelector { - if x != nil { - return x.ExtraResources - } - return nil -} - -// ResourceSelector selects a group of resources, either by name or by label. -type ResourceSelector struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` - Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` - // Types that are assignable to Match: - // - // *ResourceSelector_MatchName - // *ResourceSelector_MatchLabels - Match isResourceSelector_Match `protobuf_oneof:"match"` -} - -func (x *ResourceSelector) Reset() { - *x = ResourceSelector{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResourceSelector) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResourceSelector) ProtoMessage() {} - -func (x *ResourceSelector) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. -func (*ResourceSelector) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{5} -} - -func (x *ResourceSelector) GetApiVersion() string { - if x != nil { - return x.ApiVersion - } - return "" -} - -func (x *ResourceSelector) GetKind() string { - if x != nil { - return x.Kind - } - return "" -} - -func (m *ResourceSelector) GetMatch() isResourceSelector_Match { - if m != nil { - return m.Match - } - return nil -} - -func (x *ResourceSelector) GetMatchName() string { - if x, ok := x.GetMatch().(*ResourceSelector_MatchName); ok { - return x.MatchName - } - return "" -} - -func (x *ResourceSelector) GetMatchLabels() *MatchLabels { - if x, ok := x.GetMatch().(*ResourceSelector_MatchLabels); ok { - return x.MatchLabels - } - return nil -} - -type isResourceSelector_Match interface { - isResourceSelector_Match() -} - -type ResourceSelector_MatchName struct { - MatchName string `protobuf:"bytes,3,opt,name=match_name,json=matchName,proto3,oneof"` -} - -type ResourceSelector_MatchLabels struct { - MatchLabels *MatchLabels `protobuf:"bytes,4,opt,name=match_labels,json=matchLabels,proto3,oneof"` -} - -func (*ResourceSelector_MatchName) isResourceSelector_Match() {} - -func (*ResourceSelector_MatchLabels) isResourceSelector_Match() {} - -// MatchLabels defines a set of labels to match resources against. -type MatchLabels struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *MatchLabels) Reset() { - *x = MatchLabels{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MatchLabels) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MatchLabels) ProtoMessage() {} - -func (x *MatchLabels) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MatchLabels.ProtoReflect.Descriptor instead. -func (*MatchLabels) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{6} -} - -func (x *MatchLabels) GetLabels() map[string]string { - if x != nil { - return x.Labels - } - return nil -} - -// ResponseMeta contains metadata pertaining to a RunFunctionResponse. -type ResponseMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An opaque string identifying the content of the request. Must match the - // meta.tag of the corresponding RunFunctionRequest. - Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` - // Time-to-live of this response. Deterministic Functions with no side-effects - // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose - // to cache responses until the TTL expires. - Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3,oneof" json:"ttl,omitempty"` -} - -func (x *ResponseMeta) Reset() { - *x = ResponseMeta{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseMeta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseMeta) ProtoMessage() {} - -func (x *ResponseMeta) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. -func (*ResponseMeta) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{7} -} - -func (x *ResponseMeta) GetTag() string { - if x != nil { - return x.Tag - } - return "" -} - -func (x *ResponseMeta) GetTtl() *durationpb.Duration { - if x != nil { - return x.Ttl - } - return nil -} - -// State of the composite resource (XR) and any composed resources. -type State struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The state of the composite resource (XR). - Composite *Resource `protobuf:"bytes,1,opt,name=composite,proto3" json:"composite,omitempty"` - // The state of any composed resources. - Resources map[string]*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *State) Reset() { - *x = State{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *State) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*State) ProtoMessage() {} - -func (x *State) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use State.ProtoReflect.Descriptor instead. -func (*State) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{8} -} - -func (x *State) GetComposite() *Resource { - if x != nil { - return x.Composite - } - return nil -} - -func (x *State) GetResources() map[string]*Resource { - if x != nil { - return x.Resources - } - return nil -} - -// A Resource represents the state of a composite or composed resource. -type Resource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The JSON representation of the resource. - // - // - Crossplane will set this field in a RunFunctionRequest to the entire - // observed state of a resource - including its metadata, spec, and status. - // - // - A Function should set this field in a RunFunctionRequest to communicate - // the desired state of a composite or composed resource. - // - // - A Function may only specify the desired status of a composite resource - - // not its metadata or spec. A Function should not return desired metadata - // or spec for a composite resource. This will be ignored. - // - // - A Function may not specify the desired status of a composed resource - - // only its metadata and spec. A Function should not return desired status - // for a composed resource. This will be ignored. - Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // The resource's connection details. - // - // - Crossplane will set this field in a RunFunctionRequest to communicate the - // the observed connection details of a composite or composed resource. - // - // - A Function should set this field in a RunFunctionResponse to indicate the - // desired connection details of the composite resource. - // - // - A Function should not set this field in a RunFunctionResponse to indicate - // the desired connection details of a composed resource. This will be - // ignored. - ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Ready indicates whether the resource should be considered ready. - // - // * Crossplane will never set this field in a RunFunctionRequest. - // - // - A Function should set this field to READY_TRUE in a RunFunctionResponse - // to indicate that a desired composed resource is ready. - // - // - A Function should not set this field in a RunFunctionResponse to indicate - // that the desired composite resource is ready. This will be ignored. - Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1beta1.Ready" json:"ready,omitempty"` -} - -func (x *Resource) Reset() { - *x = Resource{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Resource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Resource) ProtoMessage() {} - -func (x *Resource) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Resource.ProtoReflect.Descriptor instead. -func (*Resource) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{9} -} - -func (x *Resource) GetResource() *structpb.Struct { - if x != nil { - return x.Resource - } - return nil -} - -func (x *Resource) GetConnectionDetails() map[string][]byte { - if x != nil { - return x.ConnectionDetails - } - return nil -} - -func (x *Resource) GetReady() Ready { - if x != nil { - return x.Ready - } - return Ready_READY_UNSPECIFIED -} - -// A Result of running a Function. -type Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Severity of this result. - Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1beta1.Severity" json:"severity,omitempty"` - // Human-readable details about the result. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *Result) Reset() { - *x = Result{} - if protoimpl.UnsafeEnabled { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Result) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Result) ProtoMessage() {} - -func (x *Result) ProtoReflect() protoreflect.Message { - mi := &file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Result.ProtoReflect.Descriptor instead. -func (*Result) Descriptor() ([]byte, []int) { - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP(), []int{10} -} - -func (x *Result) GetSeverity() Severity { - if x != nil { - return x.Severity - } - return Severity_SEVERITY_UNSPECIFIED -} - -func (x *Result) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -var File_apiextensions_fn_proto_v1beta1_run_function_proto protoreflect.FileDescriptor - -var file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = []byte{ - 0x0a, 0x31, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, - 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, - 0x2f, 0x72, 0x75, 0x6e, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xba, 0x04, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, - 0x65, 0x74, 0x61, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, - 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x07, - 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, - 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, - 0x01, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x01, 0x52, 0x07, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, 0x6f, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x6c, 0x0a, 0x13, 0x45, 0x78, - 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, - 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x4b, - 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xf0, 0x02, 0x0a, 0x13, - 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, - 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, - 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, - 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, - 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, - 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, - 0xee, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x69, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, - 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x45, - 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, - 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, - 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, - 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, - 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, - 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, - 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, - 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, - 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, - 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, - 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x3b, - 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, - 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, - 0x65, 0x61, 0x64, 0x79, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x68, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, - 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2a, 0x3f, 0x0a, 0x05, 0x52, - 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, - 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, - 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x08, - 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, - 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, - 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, - 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, - 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, - 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, 0x52, - 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, 0x70, 0x69, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, - 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, - 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, - 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x63, - 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, - 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x6e, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescOnce sync.Once - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc -) - -func file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescGZIP() []byte { - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescOnce.Do(func() { - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData) - }) - return file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDescData -} - -var file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 16) -var file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = []interface{}{ - (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready - (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity - (*RunFunctionRequest)(nil), // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest - (*Resources)(nil), // 3: apiextensions.fn.proto.v1beta1.Resources - (*RunFunctionResponse)(nil), // 4: apiextensions.fn.proto.v1beta1.RunFunctionResponse - (*RequestMeta)(nil), // 5: apiextensions.fn.proto.v1beta1.RequestMeta - (*Requirements)(nil), // 6: apiextensions.fn.proto.v1beta1.Requirements - (*ResourceSelector)(nil), // 7: apiextensions.fn.proto.v1beta1.ResourceSelector - (*MatchLabels)(nil), // 8: apiextensions.fn.proto.v1beta1.MatchLabels - (*ResponseMeta)(nil), // 9: apiextensions.fn.proto.v1beta1.ResponseMeta - (*State)(nil), // 10: apiextensions.fn.proto.v1beta1.State - (*Resource)(nil), // 11: apiextensions.fn.proto.v1beta1.Resource - (*Result)(nil), // 12: apiextensions.fn.proto.v1beta1.Result - nil, // 13: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - nil, // 14: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - nil, // 15: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - nil, // 16: apiextensions.fn.proto.v1beta1.State.ResourcesEntry - nil, // 17: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - (*structpb.Struct)(nil), // 18: google.protobuf.Struct - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration -} -var file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = []int32{ - 5, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta - 10, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State - 10, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 18, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct - 18, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct - 13, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry - 11, // 6: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource - 9, // 7: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta - 10, // 8: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State - 12, // 9: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result - 18, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct - 6, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements - 14, // 12: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry - 8, // 13: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels - 15, // 14: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry - 19, // 15: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration - 11, // 16: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource - 16, // 17: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry - 18, // 18: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct - 17, // 19: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry - 0, // 20: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready - 1, // 21: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity - 3, // 22: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources - 7, // 23: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector - 11, // 24: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource - 2, // 25: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest - 4, // 26: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse - 26, // [26:27] is the sub-list for method output_type - 25, // [25:26] is the sub-list for method input_type - 25, // [25:25] is the sub-list for extension type_name - 25, // [25:25] is the sub-list for extension extendee - 0, // [0:25] is the sub-list for field type_name -} - -func init() { file_apiextensions_fn_proto_v1beta1_run_function_proto_init() } -func file_apiextensions_fn_proto_v1beta1_run_function_proto_init() { - if File_apiextensions_fn_proto_v1beta1_run_function_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunFunctionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Resources); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunFunctionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestMeta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Requirements); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResourceSelector); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MatchLabels); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseMeta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*State); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Resource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*ResourceSelector_MatchName)(nil), - (*ResourceSelector_MatchLabels)(nil), - } - file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes[7].OneofWrappers = []interface{}{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc, - NumEnums: 2, - NumMessages: 16, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes, - DependencyIndexes: file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs, - EnumInfos: file_apiextensions_fn_proto_v1beta1_run_function_proto_enumTypes, - MessageInfos: file_apiextensions_fn_proto_v1beta1_run_function_proto_msgTypes, - }.Build() - File_apiextensions_fn_proto_v1beta1_run_function_proto = out.File - file_apiextensions_fn_proto_v1beta1_run_function_proto_rawDesc = nil - file_apiextensions_fn_proto_v1beta1_run_function_proto_goTypes = nil - file_apiextensions_fn_proto_v1beta1_run_function_proto_depIdxs = nil -} diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go new file mode 100644 index 000000000..3fe1dd60d --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.pb.go @@ -0,0 +1,1854 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto + +// Generated from apiextensions/fn/proto/v1/run_function.proto by ../hack/duplicate_proto_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Ready indicates whether a composed resource should be considered ready. +type Ready int32 + +const ( + Ready_READY_UNSPECIFIED Ready = 0 + // True means the composed resource has been observed to be ready. + Ready_READY_TRUE Ready = 1 + // False means the composed resource has not been observed to be ready. + Ready_READY_FALSE Ready = 2 +) + +// Enum value maps for Ready. +var ( + Ready_name = map[int32]string{ + 0: "READY_UNSPECIFIED", + 1: "READY_TRUE", + 2: "READY_FALSE", + } + Ready_value = map[string]int32{ + "READY_UNSPECIFIED": 0, + "READY_TRUE": 1, + "READY_FALSE": 2, + } +) + +func (x Ready) Enum() *Ready { + p := new(Ready) + *p = x + return p +} + +func (x Ready) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ready) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[0].Descriptor() +} + +func (Ready) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[0] +} + +func (x Ready) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ready.Descriptor instead. +func (Ready) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{0} +} + +// Severity of Function results. +type Severity int32 + +const ( + Severity_SEVERITY_UNSPECIFIED Severity = 0 + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + Severity_SEVERITY_FATAL Severity = 1 + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + Severity_SEVERITY_WARNING Severity = 2 + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + Severity_SEVERITY_NORMAL Severity = 3 +) + +// Enum value maps for Severity. +var ( + Severity_name = map[int32]string{ + 0: "SEVERITY_UNSPECIFIED", + 1: "SEVERITY_FATAL", + 2: "SEVERITY_WARNING", + 3: "SEVERITY_NORMAL", + } + Severity_value = map[string]int32{ + "SEVERITY_UNSPECIFIED": 0, + "SEVERITY_FATAL": 1, + "SEVERITY_WARNING": 2, + "SEVERITY_NORMAL": 3, + } +) + +func (x Severity) Enum() *Severity { + p := new(Severity) + *p = x + return p +} + +func (x Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Severity) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[1].Descriptor() +} + +func (Severity) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[1] +} + +func (x Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Severity.Descriptor instead. +func (Severity) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{1} +} + +// Target of Function results and conditions. +type Target int32 + +const ( + // If the target is unspecified, the result targets the composite resource. + Target_TARGET_UNSPECIFIED Target = 0 + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + Target_TARGET_COMPOSITE Target = 1 + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + Target_TARGET_COMPOSITE_AND_CLAIM Target = 2 +) + +// Enum value maps for Target. +var ( + Target_name = map[int32]string{ + 0: "TARGET_UNSPECIFIED", + 1: "TARGET_COMPOSITE", + 2: "TARGET_COMPOSITE_AND_CLAIM", + } + Target_value = map[string]int32{ + "TARGET_UNSPECIFIED": 0, + "TARGET_COMPOSITE": 1, + "TARGET_COMPOSITE_AND_CLAIM": 2, + } +) + +func (x Target) Enum() *Target { + p := new(Target) + *p = x + return p +} + +func (x Target) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Target) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[2].Descriptor() +} + +func (Target) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[2] +} + +func (x Target) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Target.Descriptor instead. +func (Target) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{2} +} + +type Status int32 + +const ( + Status_STATUS_CONDITION_UNSPECIFIED Status = 0 + Status_STATUS_CONDITION_UNKNOWN Status = 1 + Status_STATUS_CONDITION_TRUE Status = 2 + Status_STATUS_CONDITION_FALSE Status = 3 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "STATUS_CONDITION_UNSPECIFIED", + 1: "STATUS_CONDITION_UNKNOWN", + 2: "STATUS_CONDITION_TRUE", + 3: "STATUS_CONDITION_FALSE", + } + Status_value = map[string]int32{ + "STATUS_CONDITION_UNSPECIFIED": 0, + "STATUS_CONDITION_UNKNOWN": 1, + "STATUS_CONDITION_TRUE": 2, + "STATUS_CONDITION_FALSE": 3, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[3].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes[3] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{3} +} + +// A RunFunctionRequest requests that the Composition Function be run. +type RunFunctionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this request. + Meta *RequestMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + Observed *State `protobuf:"bytes,2,opt,name=observed,proto3" json:"observed,omitempty"` + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by previous Functions in the + // pipeline. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + Desired *State `protobuf:"bytes,3,opt,name=desired,proto3" json:"desired,omitempty"` + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + Input *structpb.Struct `protobuf:"bytes,4,opt,name=input,proto3,oneof" json:"input,omitempty"` + // Optional context. Crossplane may pass arbitary contextual information to a + // Function. A Function may also return context in its RunFunctionResponse, + // and that context will be passed to subsequent Functions. Crossplane + // discards all context returned by the last Function in the pipeline. + Context *structpb.Struct `protobuf:"bytes,5,opt,name=context,proto3,oneof" json:"context,omitempty"` + // Optional extra resources that the Function required. + // Note that extra resources is a map to Resources, plural. + // The map key corresponds to the key in a RunFunctionResponse's + // extra_resources field. If a Function requested extra resources that + // did not exist, Crossplane sets the map key to an empty Resources message to + // indicate that it attempted to satisfy the request. + ExtraResources map[string]*Resources `protobuf:"bytes,6,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional credentials that this Function may use to communicate with an + // external system. + Credentials map[string]*Credentials `protobuf:"bytes,7,rep,name=credentials,proto3" json:"credentials,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RunFunctionRequest) Reset() { + *x = RunFunctionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionRequest) ProtoMessage() {} + +func (x *RunFunctionRequest) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionRequest.ProtoReflect.Descriptor instead. +func (*RunFunctionRequest) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{0} +} + +func (x *RunFunctionRequest) GetMeta() *RequestMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionRequest) GetObserved() *State { + if x != nil { + return x.Observed + } + return nil +} + +func (x *RunFunctionRequest) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionRequest) GetInput() *structpb.Struct { + if x != nil { + return x.Input + } + return nil +} + +func (x *RunFunctionRequest) GetContext() *structpb.Struct { + if x != nil { + return x.Context + } + return nil +} + +func (x *RunFunctionRequest) GetExtraResources() map[string]*Resources { + if x != nil { + return x.ExtraResources + } + return nil +} + +func (x *RunFunctionRequest) GetCredentials() map[string]*Credentials { + if x != nil { + return x.Credentials + } + return nil +} + +// Credentials that a Function may use to communicate with an external system. +type Credentials struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Source of the credentials. + // + // Types that are assignable to Source: + // + // *Credentials_CredentialData + Source isCredentials_Source `protobuf_oneof:"source"` +} + +func (x *Credentials) Reset() { + *x = Credentials{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credentials) ProtoMessage() {} + +func (x *Credentials) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credentials.ProtoReflect.Descriptor instead. +func (*Credentials) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{1} +} + +func (m *Credentials) GetSource() isCredentials_Source { + if m != nil { + return m.Source + } + return nil +} + +func (x *Credentials) GetCredentialData() *CredentialData { + if x, ok := x.GetSource().(*Credentials_CredentialData); ok { + return x.CredentialData + } + return nil +} + +type isCredentials_Source interface { + isCredentials_Source() +} + +type Credentials_CredentialData struct { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData *CredentialData `protobuf:"bytes,1,opt,name=credential_data,json=credentialData,proto3,oneof"` +} + +func (*Credentials_CredentialData) isCredentials_Source() {} + +// CredentialData loaded by Crossplane, for example from a Secret. +type CredentialData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data map[string][]byte `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CredentialData) Reset() { + *x = CredentialData{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CredentialData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CredentialData) ProtoMessage() {} + +func (x *CredentialData) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CredentialData.ProtoReflect.Descriptor instead. +func (*CredentialData) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{2} +} + +func (x *CredentialData) GetData() map[string][]byte { + if x != nil { + return x.Data + } + return nil +} + +// Resources represents the state of several Crossplane resources. +type Resources struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items []*Resource `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (x *Resources) Reset() { + *x = Resources{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resources) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resources) ProtoMessage() {} + +func (x *Resources) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resources.ProtoReflect.Descriptor instead. +func (*Resources) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{3} +} + +func (x *Resources) GetItems() []*Resource { + if x != nil { + return x.Items + } + return nil +} + +// A RunFunctionResponse contains the result of a Composition Function run. +type RunFunctionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Metadata pertaining to this response. + Meta *ResponseMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + Desired *State `protobuf:"bytes,2,opt,name=desired,proto3" json:"desired,omitempty"` + // Results of the Function run. Results are used for observability purposes. + Results []*Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` + // Optional context to be passed to the next Function in the pipeline as part + // of the RunFunctionRequest. Dropped on the last function in the pipeline. + Context *structpb.Struct `protobuf:"bytes,4,opt,name=context,proto3,oneof" json:"context,omitempty"` + // Requirements that must be satisfied for this Function to run successfully. + Requirements *Requirements `protobuf:"bytes,5,opt,name=requirements,proto3" json:"requirements,omitempty"` + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + Conditions []*Condition `protobuf:"bytes,6,rep,name=conditions,proto3" json:"conditions,omitempty"` +} + +func (x *RunFunctionResponse) Reset() { + *x = RunFunctionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunFunctionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunFunctionResponse) ProtoMessage() {} + +func (x *RunFunctionResponse) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunFunctionResponse.ProtoReflect.Descriptor instead. +func (*RunFunctionResponse) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{4} +} + +func (x *RunFunctionResponse) GetMeta() *ResponseMeta { + if x != nil { + return x.Meta + } + return nil +} + +func (x *RunFunctionResponse) GetDesired() *State { + if x != nil { + return x.Desired + } + return nil +} + +func (x *RunFunctionResponse) GetResults() []*Result { + if x != nil { + return x.Results + } + return nil +} + +func (x *RunFunctionResponse) GetContext() *structpb.Struct { + if x != nil { + return x.Context + } + return nil +} + +func (x *RunFunctionResponse) GetRequirements() *Requirements { + if x != nil { + return x.Requirements + } + return nil +} + +func (x *RunFunctionResponse) GetConditions() []*Condition { + if x != nil { + return x.Conditions + } + return nil +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +type RequestMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` +} + +func (x *RequestMeta) Reset() { + *x = RequestMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestMeta) ProtoMessage() {} + +func (x *RequestMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestMeta.ProtoReflect.Descriptor instead. +func (*RequestMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{5} +} + +func (x *RequestMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +// Requirements that must be satisfied for a Function to run successfully. +type Requirements struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Extra resources that this Function requires. + // The map key uniquely identifies the group of resources. + ExtraResources map[string]*ResourceSelector `protobuf:"bytes,1,rep,name=extra_resources,json=extraResources,proto3" json:"extra_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Requirements) Reset() { + *x = Requirements{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Requirements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Requirements) ProtoMessage() {} + +func (x *Requirements) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Requirements.ProtoReflect.Descriptor instead. +func (*Requirements) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{6} +} + +func (x *Requirements) GetExtraResources() map[string]*ResourceSelector { + if x != nil { + return x.ExtraResources + } + return nil +} + +// ResourceSelector selects a group of resources, either by name or by label. +type ResourceSelector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // API version of resources to select. + ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"` + // Kind of resources to select. + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + // Resources to match. + // + // Types that are assignable to Match: + // + // *ResourceSelector_MatchName + // *ResourceSelector_MatchLabels + Match isResourceSelector_Match `protobuf_oneof:"match"` +} + +func (x *ResourceSelector) Reset() { + *x = ResourceSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSelector) ProtoMessage() {} + +func (x *ResourceSelector) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSelector.ProtoReflect.Descriptor instead. +func (*ResourceSelector) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceSelector) GetApiVersion() string { + if x != nil { + return x.ApiVersion + } + return "" +} + +func (x *ResourceSelector) GetKind() string { + if x != nil { + return x.Kind + } + return "" +} + +func (m *ResourceSelector) GetMatch() isResourceSelector_Match { + if m != nil { + return m.Match + } + return nil +} + +func (x *ResourceSelector) GetMatchName() string { + if x, ok := x.GetMatch().(*ResourceSelector_MatchName); ok { + return x.MatchName + } + return "" +} + +func (x *ResourceSelector) GetMatchLabels() *MatchLabels { + if x, ok := x.GetMatch().(*ResourceSelector_MatchLabels); ok { + return x.MatchLabels + } + return nil +} + +type isResourceSelector_Match interface { + isResourceSelector_Match() +} + +type ResourceSelector_MatchName struct { + // Match the resource with this name. + MatchName string `protobuf:"bytes,3,opt,name=match_name,json=matchName,proto3,oneof"` +} + +type ResourceSelector_MatchLabels struct { + // Match all resources with these labels. + MatchLabels *MatchLabels `protobuf:"bytes,4,opt,name=match_labels,json=matchLabels,proto3,oneof"` +} + +func (*ResourceSelector_MatchName) isResourceSelector_Match() {} + +func (*ResourceSelector_MatchLabels) isResourceSelector_Match() {} + +// MatchLabels defines a set of labels to match resources against. +type MatchLabels struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MatchLabels) Reset() { + *x = MatchLabels{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MatchLabels) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MatchLabels) ProtoMessage() {} + +func (x *MatchLabels) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MatchLabels.ProtoReflect.Descriptor instead. +func (*MatchLabels) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{8} +} + +func (x *MatchLabels) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +type ResponseMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + Tag string `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + Ttl *durationpb.Duration `protobuf:"bytes,2,opt,name=ttl,proto3,oneof" json:"ttl,omitempty"` +} + +func (x *ResponseMeta) Reset() { + *x = ResponseMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseMeta) ProtoMessage() {} + +func (x *ResponseMeta) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseMeta.ProtoReflect.Descriptor instead. +func (*ResponseMeta) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{9} +} + +func (x *ResponseMeta) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *ResponseMeta) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +// State of the composite resource (XR) and any composed resources. +type State struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The state of the composite resource (XR). + Composite *Resource `protobuf:"bytes,1,opt,name=composite,proto3" json:"composite,omitempty"` + // The state of any composed resources. + Resources map[string]*Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *State) Reset() { + *x = State{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *State) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*State) ProtoMessage() {} + +func (x *State) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use State.ProtoReflect.Descriptor instead. +func (*State) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{10} +} + +func (x *State) GetComposite() *Resource { + if x != nil { + return x.Composite + } + return nil +} + +func (x *State) GetResources() map[string]*Resource { + if x != nil { + return x.Resources + } + return nil +} + +// A Resource represents the state of a composite or composed resource. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The JSON representation of the resource. + // + // - Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. + // + // - A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. + // + // - A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. + // + // - A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. + Resource *structpb.Struct `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // The resource's connection details. + // + // - Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. + // + // - A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. + // + // - A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. + ConnectionDetails map[string][]byte `protobuf:"bytes,2,rep,name=connection_details,json=connectionDetails,proto3" json:"connection_details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Ready indicates whether the resource should be considered ready. + // + // * Crossplane will never set this field in a RunFunctionRequest. + // + // - A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. + // + // - A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. + Ready Ready `protobuf:"varint,3,opt,name=ready,proto3,enum=apiextensions.fn.proto.v1beta1.Ready" json:"ready,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{11} +} + +func (x *Resource) GetResource() *structpb.Struct { + if x != nil { + return x.Resource + } + return nil +} + +func (x *Resource) GetConnectionDetails() map[string][]byte { + if x != nil { + return x.ConnectionDetails + } + return nil +} + +func (x *Resource) GetReady() Ready { + if x != nil { + return x.Ready + } + return Ready_READY_UNSPECIFIED +} + +// A Result of running a Function. +type Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Severity of this result. + Severity Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=apiextensions.fn.proto.v1beta1.Severity" json:"severity,omitempty"` + // Human-readable details about the result. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + Reason *string `protobuf:"bytes,3,opt,name=reason,proto3,oneof" json:"reason,omitempty"` + // The resources this result targets. + Target *Target `protobuf:"varint,4,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` +} + +func (x *Result) Reset() { + *x = Result{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Result) ProtoMessage() {} + +func (x *Result) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Result.ProtoReflect.Descriptor instead. +func (*Result) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{12} +} + +func (x *Result) GetSeverity() Severity { + if x != nil { + return x.Severity + } + return Severity_SEVERITY_UNSPECIFIED +} + +func (x *Result) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Result) GetReason() string { + if x != nil && x.Reason != nil { + return *x.Reason + } + return "" +} + +func (x *Result) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +type Condition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of condition in PascalCase. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Status of the condition. + Status Status `protobuf:"varint,2,opt,name=status,proto3,enum=apiextensions.fn.proto.v1beta1.Status" json:"status,omitempty"` + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + Message *string `protobuf:"bytes,4,opt,name=message,proto3,oneof" json:"message,omitempty"` + // The resources this condition targets. + Target *Target `protobuf:"varint,5,opt,name=target,proto3,enum=apiextensions.fn.proto.v1beta1.Target,oneof" json:"target,omitempty"` +} + +func (x *Condition) Reset() { + *x = Condition{} + if protoimpl.UnsafeEnabled { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Condition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Condition) ProtoMessage() {} + +func (x *Condition) ProtoReflect() protoreflect.Message { + mi := &file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Condition.ProtoReflect.Descriptor instead. +func (*Condition) Descriptor() ([]byte, []int) { + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP(), []int{13} +} + +func (x *Condition) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Condition) GetStatus() Status { + if x != nil { + return x.Status + } + return Status_STATUS_CONDITION_UNSPECIFIED +} + +func (x *Condition) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *Condition) GetMessage() string { + if x != nil && x.Message != nil { + return *x.Message + } + return "" +} + +func (x *Condition) GetTarget() Target { + if x != nil && x.Target != nil { + return *x.Target + } + return Target_TARGET_UNSPECIFIED +} + +var File_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto protoreflect.FileDescriptor + +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc = []byte{ + 0x0a, 0x3e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x66, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2f, 0x7a, 0x7a, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x1e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, + 0x06, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, + 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, + 0x69, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x48, 0x00, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x36, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, 0x6f, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x46, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x61, + 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, + 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x6c, + 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6b, 0x0a, 0x10, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, + 0x72, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x59, + 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, + 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x05, 0x69, 0x74, + 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xbb, 0x03, 0x0a, 0x13, 0x52, + 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, + 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x07, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, + 0x73, 0x69, 0x72, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0a, 0x0a, 0x08, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x1f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0xee, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x69, 0x0a, 0x0f, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x61, 0x70, + 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x4f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x37, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5a, 0x0a, 0x0c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, + 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x30, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x88, 0x01, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x74, 0x74, 0x6c, 0x22, 0x8b, 0x02, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x66, + 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x79, 0x52, 0x05, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe0, 0x01, 0x0a, 0x06, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, + 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x01, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xf2, + 0x01, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, + 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x2a, 0x3f, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x15, 0x0a, 0x11, + 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x54, 0x52, 0x55, + 0x45, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x46, 0x41, 0x4c, + 0x53, 0x45, 0x10, 0x02, 0x2a, 0x63, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, + 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x45, + 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x49, + 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x03, 0x2a, 0x56, 0x0a, 0x06, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, 0x49, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x53, 0x49, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x41, 0x49, 0x4d, 0x10, + 0x02, 0x2a, 0x7f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x0a, 0x1c, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, 0x0a, + 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x52, 0x55, 0x45, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, + 0x10, 0x03, 0x32, 0x91, 0x01, 0x0a, 0x15, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x75, 0x6e, 0x6e, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, + 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x61, 0x70, + 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x66, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x52, 0x75, 0x6e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x33, 0x2e, 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x66, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2e, 0x52, 0x75, 0x6e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, + 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x61, 0x70, 0x69, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x66, 0x6e, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescOnce sync.Once + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData = file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc +) + +func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescGZIP() []byte { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescOnce.Do(func() { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData = protoimpl.X.CompressGZIP(file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData) + }) + return file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDescData +} + +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes = []any{ + (Ready)(0), // 0: apiextensions.fn.proto.v1beta1.Ready + (Severity)(0), // 1: apiextensions.fn.proto.v1beta1.Severity + (Target)(0), // 2: apiextensions.fn.proto.v1beta1.Target + (Status)(0), // 3: apiextensions.fn.proto.v1beta1.Status + (*RunFunctionRequest)(nil), // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest + (*Credentials)(nil), // 5: apiextensions.fn.proto.v1beta1.Credentials + (*CredentialData)(nil), // 6: apiextensions.fn.proto.v1beta1.CredentialData + (*Resources)(nil), // 7: apiextensions.fn.proto.v1beta1.Resources + (*RunFunctionResponse)(nil), // 8: apiextensions.fn.proto.v1beta1.RunFunctionResponse + (*RequestMeta)(nil), // 9: apiextensions.fn.proto.v1beta1.RequestMeta + (*Requirements)(nil), // 10: apiextensions.fn.proto.v1beta1.Requirements + (*ResourceSelector)(nil), // 11: apiextensions.fn.proto.v1beta1.ResourceSelector + (*MatchLabels)(nil), // 12: apiextensions.fn.proto.v1beta1.MatchLabels + (*ResponseMeta)(nil), // 13: apiextensions.fn.proto.v1beta1.ResponseMeta + (*State)(nil), // 14: apiextensions.fn.proto.v1beta1.State + (*Resource)(nil), // 15: apiextensions.fn.proto.v1beta1.Resource + (*Result)(nil), // 16: apiextensions.fn.proto.v1beta1.Result + (*Condition)(nil), // 17: apiextensions.fn.proto.v1beta1.Condition + nil, // 18: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + nil, // 19: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + nil, // 20: apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + nil, // 21: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + nil, // 22: apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + nil, // 23: apiextensions.fn.proto.v1beta1.State.ResourcesEntry + nil, // 24: apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + (*structpb.Struct)(nil), // 25: google.protobuf.Struct + (*durationpb.Duration)(nil), // 26: google.protobuf.Duration +} +var file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_depIdxs = []int32{ + 9, // 0: apiextensions.fn.proto.v1beta1.RunFunctionRequest.meta:type_name -> apiextensions.fn.proto.v1beta1.RequestMeta + 14, // 1: apiextensions.fn.proto.v1beta1.RunFunctionRequest.observed:type_name -> apiextensions.fn.proto.v1beta1.State + 14, // 2: apiextensions.fn.proto.v1beta1.RunFunctionRequest.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 25, // 3: apiextensions.fn.proto.v1beta1.RunFunctionRequest.input:type_name -> google.protobuf.Struct + 25, // 4: apiextensions.fn.proto.v1beta1.RunFunctionRequest.context:type_name -> google.protobuf.Struct + 18, // 5: apiextensions.fn.proto.v1beta1.RunFunctionRequest.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry + 19, // 6: apiextensions.fn.proto.v1beta1.RunFunctionRequest.credentials:type_name -> apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry + 6, // 7: apiextensions.fn.proto.v1beta1.Credentials.credential_data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData + 20, // 8: apiextensions.fn.proto.v1beta1.CredentialData.data:type_name -> apiextensions.fn.proto.v1beta1.CredentialData.DataEntry + 15, // 9: apiextensions.fn.proto.v1beta1.Resources.items:type_name -> apiextensions.fn.proto.v1beta1.Resource + 13, // 10: apiextensions.fn.proto.v1beta1.RunFunctionResponse.meta:type_name -> apiextensions.fn.proto.v1beta1.ResponseMeta + 14, // 11: apiextensions.fn.proto.v1beta1.RunFunctionResponse.desired:type_name -> apiextensions.fn.proto.v1beta1.State + 16, // 12: apiextensions.fn.proto.v1beta1.RunFunctionResponse.results:type_name -> apiextensions.fn.proto.v1beta1.Result + 25, // 13: apiextensions.fn.proto.v1beta1.RunFunctionResponse.context:type_name -> google.protobuf.Struct + 10, // 14: apiextensions.fn.proto.v1beta1.RunFunctionResponse.requirements:type_name -> apiextensions.fn.proto.v1beta1.Requirements + 17, // 15: apiextensions.fn.proto.v1beta1.RunFunctionResponse.conditions:type_name -> apiextensions.fn.proto.v1beta1.Condition + 21, // 16: apiextensions.fn.proto.v1beta1.Requirements.extra_resources:type_name -> apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry + 12, // 17: apiextensions.fn.proto.v1beta1.ResourceSelector.match_labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels + 22, // 18: apiextensions.fn.proto.v1beta1.MatchLabels.labels:type_name -> apiextensions.fn.proto.v1beta1.MatchLabels.LabelsEntry + 26, // 19: apiextensions.fn.proto.v1beta1.ResponseMeta.ttl:type_name -> google.protobuf.Duration + 15, // 20: apiextensions.fn.proto.v1beta1.State.composite:type_name -> apiextensions.fn.proto.v1beta1.Resource + 23, // 21: apiextensions.fn.proto.v1beta1.State.resources:type_name -> apiextensions.fn.proto.v1beta1.State.ResourcesEntry + 25, // 22: apiextensions.fn.proto.v1beta1.Resource.resource:type_name -> google.protobuf.Struct + 24, // 23: apiextensions.fn.proto.v1beta1.Resource.connection_details:type_name -> apiextensions.fn.proto.v1beta1.Resource.ConnectionDetailsEntry + 0, // 24: apiextensions.fn.proto.v1beta1.Resource.ready:type_name -> apiextensions.fn.proto.v1beta1.Ready + 1, // 25: apiextensions.fn.proto.v1beta1.Result.severity:type_name -> apiextensions.fn.proto.v1beta1.Severity + 2, // 26: apiextensions.fn.proto.v1beta1.Result.target:type_name -> apiextensions.fn.proto.v1beta1.Target + 3, // 27: apiextensions.fn.proto.v1beta1.Condition.status:type_name -> apiextensions.fn.proto.v1beta1.Status + 2, // 28: apiextensions.fn.proto.v1beta1.Condition.target:type_name -> apiextensions.fn.proto.v1beta1.Target + 7, // 29: apiextensions.fn.proto.v1beta1.RunFunctionRequest.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resources + 5, // 30: apiextensions.fn.proto.v1beta1.RunFunctionRequest.CredentialsEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Credentials + 11, // 31: apiextensions.fn.proto.v1beta1.Requirements.ExtraResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.ResourceSelector + 15, // 32: apiextensions.fn.proto.v1beta1.State.ResourcesEntry.value:type_name -> apiextensions.fn.proto.v1beta1.Resource + 4, // 33: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:input_type -> apiextensions.fn.proto.v1beta1.RunFunctionRequest + 8, // 34: apiextensions.fn.proto.v1beta1.FunctionRunnerService.RunFunction:output_type -> apiextensions.fn.proto.v1beta1.RunFunctionResponse + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() } +func file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_init() { + if File_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*RunFunctionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*Credentials); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*CredentialData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Resources); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*RunFunctionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*RequestMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*Requirements); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ResourceSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*MatchLabels); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*ResponseMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*State); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*Condition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[0].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[1].OneofWrappers = []any{ + (*Credentials_CredentialData)(nil), + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[4].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[7].OneofWrappers = []any{ + (*ResourceSelector_MatchName)(nil), + (*ResourceSelector_MatchLabels)(nil), + } + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[9].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[12].OneofWrappers = []any{} + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes[13].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc, + NumEnums: 4, + NumMessages: 21, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes, + DependencyIndexes: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_depIdxs, + EnumInfos: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_enumTypes, + MessageInfos: file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_msgTypes, + }.Build() + File_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto = out.File + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_rawDesc = nil + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_goTypes = nil + file_apiextensions_fn_proto_v1beta1_zz_generated_run_function_proto_depIdxs = nil +} diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto new file mode 100644 index 000000000..52ab9266c --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto @@ -0,0 +1,328 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +syntax = "proto3"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/duration.proto"; + +// Generated from apiextensions/fn/proto/v1/run_function.proto by ../hack/duplicate_proto_type.sh. DO NOT EDIT. + +package apiextensions.fn.proto.v1beta1; + +option go_package = "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1"; + +// A FunctionRunnerService is a Composition Function. +service FunctionRunnerService { + // RunFunction runs the Composition Function. + rpc RunFunction(RunFunctionRequest) returns (RunFunctionResponse) {} +} + +// A RunFunctionRequest requests that the Composition Function be run. +message RunFunctionRequest { + // Metadata pertaining to this request. + RequestMeta meta = 1; + + // The observed state prior to invocation of a Function pipeline. State passed + // to each Function is fresh as of the time the pipeline was invoked, not as + // of the time each Function was invoked. + State observed = 2; + + // Desired state according to a Function pipeline. The state passed to a + // particular Function may have been accumulated by previous Functions in the + // pipeline. + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + State desired = 3; + + // Optional input specific to this Function invocation. A JSON representation + // of the 'input' block of the relevant entry in a Composition's pipeline. + optional google.protobuf.Struct input = 4; + + // Optional context. Crossplane may pass arbitary contextual information to a + // Function. A Function may also return context in its RunFunctionResponse, + // and that context will be passed to subsequent Functions. Crossplane + // discards all context returned by the last Function in the pipeline. + optional google.protobuf.Struct context = 5; + + // Optional extra resources that the Function required. + // Note that extra resources is a map to Resources, plural. + // The map key corresponds to the key in a RunFunctionResponse's + // extra_resources field. If a Function requested extra resources that + // did not exist, Crossplane sets the map key to an empty Resources message to + // indicate that it attempted to satisfy the request. + map extra_resources = 6; + + // Optional credentials that this Function may use to communicate with an + // external system. + map credentials = 7; +} + +// Credentials that a Function may use to communicate with an external system. +message Credentials { + // Source of the credentials. + oneof source { + // Credential data loaded by Crossplane, for example from a Secret. + CredentialData credential_data = 1; + } +} + +// CredentialData loaded by Crossplane, for example from a Secret. +message CredentialData { + map data = 1; +} + +// Resources represents the state of several Crossplane resources. +message Resources { + repeated Resource items = 1; +} + +// A RunFunctionResponse contains the result of a Composition Function run. +message RunFunctionResponse { + // Metadata pertaining to this response. + ResponseMeta meta = 1; + + // Desired state according to a Function pipeline. Functions may add desired + // state, and may mutate or delete any part of the desired state they are + // concerned with. A Function must pass through any part of the desired state + // that it is not concerned with. + // + // + // Note that the desired state must be a partial object with only the fields + // that this function (and its predecessors in the pipeline) wants to have + // set in the object. Copying a non-partial observed state to desired is most + // likely not what you want to do. Leaving out fields that had been returned + // as desired before will result in them being deleted from the objects in the + // cluster. + State desired = 2; + + // Results of the Function run. Results are used for observability purposes. + repeated Result results = 3; + + // Optional context to be passed to the next Function in the pipeline as part + // of the RunFunctionRequest. Dropped on the last function in the pipeline. + optional google.protobuf.Struct context = 4; + + // Requirements that must be satisfied for this Function to run successfully. + Requirements requirements = 5; + + // Status conditions to be applied to the composite resource. Conditions may also + // optionally be applied to the composite resource's associated claim. + repeated Condition conditions = 6; +} + +// RequestMeta contains metadata pertaining to a RunFunctionRequest. +message RequestMeta { + // An opaque string identifying the content of the request. Two identical + // requests should have the same tag. + string tag = 1; +} + +// Requirements that must be satisfied for a Function to run successfully. +message Requirements { + // Extra resources that this Function requires. + // The map key uniquely identifies the group of resources. + map extra_resources = 1; +} + +// ResourceSelector selects a group of resources, either by name or by label. +message ResourceSelector { + // API version of resources to select. + string api_version = 1; + + // Kind of resources to select. + string kind = 2; + + // Resources to match. + oneof match { + // Match the resource with this name. + string match_name = 3; + + // Match all resources with these labels. + MatchLabels match_labels = 4; + } +} + +// MatchLabels defines a set of labels to match resources against. +message MatchLabels { + map labels = 1; +} + +// ResponseMeta contains metadata pertaining to a RunFunctionResponse. +message ResponseMeta { + // An opaque string identifying the content of the request. Must match the + // meta.tag of the corresponding RunFunctionRequest. + string tag = 1; + + // Time-to-live of this response. Deterministic Functions with no side-effects + // (e.g. simple templating Functions) may specify a TTL. Crossplane may choose + // to cache responses until the TTL expires. + optional google.protobuf.Duration ttl = 2; +} + +// State of the composite resource (XR) and any composed resources. +message State { + // The state of the composite resource (XR). + Resource composite = 1; + + // The state of any composed resources. + map resources = 2; +} + +// A Resource represents the state of a composite or composed resource. +message Resource { + // The JSON representation of the resource. + // + // * Crossplane will set this field in a RunFunctionRequest to the entire + // observed state of a resource - including its metadata, spec, and status. + // + // * A Function should set this field in a RunFunctionRequest to communicate + // the desired state of a composite or composed resource. + // + // * A Function may only specify the desired status of a composite resource - + // not its metadata or spec. A Function should not return desired metadata + // or spec for a composite resource. This will be ignored. + // + // * A Function may not specify the desired status of a composed resource - + // only its metadata and spec. A Function should not return desired status + // for a composed resource. This will be ignored. + google.protobuf.Struct resource = 1; + + // The resource's connection details. + // + // * Crossplane will set this field in a RunFunctionRequest to communicate the + // the observed connection details of a composite or composed resource. + // + // * A Function should set this field in a RunFunctionResponse to indicate the + // desired connection details of the composite resource. + // + // * A Function should not set this field in a RunFunctionResponse to indicate + // the desired connection details of a composed resource. This will be + // ignored. + map connection_details = 2; + + // Ready indicates whether the resource should be considered ready. + // + // * Crossplane will never set this field in a RunFunctionRequest. + // + // * A Function should set this field to READY_TRUE in a RunFunctionResponse + // to indicate that a desired composed resource is ready. + // + // * A Function should not set this field in a RunFunctionResponse to indicate + // that the desired composite resource is ready. This will be ignored. + Ready ready = 3; +} + +// Ready indicates whether a composed resource should be considered ready. +enum Ready { + READY_UNSPECIFIED = 0; + + // True means the composed resource has been observed to be ready. + READY_TRUE = 1; + + // False means the composed resource has not been observed to be ready. + READY_FALSE = 2; +} + +// A Result of running a Function. +message Result { + // Severity of this result. + Severity severity = 1; + + // Human-readable details about the result. + string message = 2; + + // Optional PascalCase, machine-readable reason for this result. If omitted, + // the value will be ComposeResources. + optional string reason = 3; + + // The resources this result targets. + optional Target target = 4; +} + +// Severity of Function results. +enum Severity { + SEVERITY_UNSPECIFIED = 0; + + // Fatal results are fatal; subsequent Composition Functions may run, but + // the Composition Function pipeline run will be considered a failure and + // the first fatal result will be returned as an error. + SEVERITY_FATAL = 1; + + // Warning results are non-fatal; the entire Composition will run to + // completion but warning events and debug logs associated with the + // composite resource will be emitted. + SEVERITY_WARNING = 2; + + // Normal results are emitted as normal events and debug logs associated + // with the composite resource. + SEVERITY_NORMAL = 3; +} + +// Target of Function results and conditions. +enum Target { + // If the target is unspecified, the result targets the composite resource. + TARGET_UNSPECIFIED = 0; + + // Target the composite resource. Results that target the composite resource + // should include detailed, advanced information. + TARGET_COMPOSITE = 1; + + // Target the composite and the claim. Results that target the composite and + // the claim should include only end-user friendly information. + TARGET_COMPOSITE_AND_CLAIM = 2; +} + +// Status condition to be applied to the composite resource. Condition may also +// optionally be applied to the composite resource's associated claim. For +// detailed information on proper usage of status conditions, please see +// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties. +message Condition { + // Type of condition in PascalCase. + string type = 1; + + // Status of the condition. + Status status = 2; + + // Reason contains a programmatic identifier indicating the reason for the + // condition's last transition. Producers of specific condition types may + // define expected values and meanings for this field, and whether the values + // are considered a guaranteed API. The value should be a PascalCase string. + // This field may not be empty. + string reason = 3; + + // Message is a human readable message indicating details about the + // transition. This may be an empty string. + optional string message = 4; + + // The resources this condition targets. + optional Target target = 5; +} + +enum Status { + STATUS_CONDITION_UNSPECIFIED = 0; + + STATUS_CONDITION_UNKNOWN = 1; + + STATUS_CONDITION_TRUE = 2; + + STATUS_CONDITION_FALSE = 3; +} diff --git a/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go new file mode 100644 index 000000000..ec3da1b32 --- /dev/null +++ b/apis/apiextensions/fn/proto/v1beta1/zz_generated_run_function_grpc.pb.go @@ -0,0 +1,128 @@ +// +//Copyright 2022 The Crossplane Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto + +// Generated from apiextensions/fn/proto/v1/run_function.proto by ../hack/duplicate_proto_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + FunctionRunnerService_RunFunction_FullMethodName = "/apiextensions.fn.proto.v1beta1.FunctionRunnerService/RunFunction" +) + +// FunctionRunnerServiceClient is the client API for FunctionRunnerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type FunctionRunnerServiceClient interface { + // RunFunction runs the Composition Function. + RunFunction(ctx context.Context, in *RunFunctionRequest, opts ...grpc.CallOption) (*RunFunctionResponse, error) +} + +type functionRunnerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewFunctionRunnerServiceClient(cc grpc.ClientConnInterface) FunctionRunnerServiceClient { + return &functionRunnerServiceClient{cc} +} + +func (c *functionRunnerServiceClient) RunFunction(ctx context.Context, in *RunFunctionRequest, opts ...grpc.CallOption) (*RunFunctionResponse, error) { + out := new(RunFunctionResponse) + err := c.cc.Invoke(ctx, FunctionRunnerService_RunFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FunctionRunnerServiceServer is the server API for FunctionRunnerService service. +// All implementations must embed UnimplementedFunctionRunnerServiceServer +// for forward compatibility +type FunctionRunnerServiceServer interface { + // RunFunction runs the Composition Function. + RunFunction(context.Context, *RunFunctionRequest) (*RunFunctionResponse, error) + mustEmbedUnimplementedFunctionRunnerServiceServer() +} + +// UnimplementedFunctionRunnerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedFunctionRunnerServiceServer struct { +} + +func (UnimplementedFunctionRunnerServiceServer) RunFunction(context.Context, *RunFunctionRequest) (*RunFunctionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunFunction not implemented") +} +func (UnimplementedFunctionRunnerServiceServer) mustEmbedUnimplementedFunctionRunnerServiceServer() {} + +// UnsafeFunctionRunnerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FunctionRunnerServiceServer will +// result in compilation errors. +type UnsafeFunctionRunnerServiceServer interface { + mustEmbedUnimplementedFunctionRunnerServiceServer() +} + +func RegisterFunctionRunnerServiceServer(s grpc.ServiceRegistrar, srv FunctionRunnerServiceServer) { + s.RegisterService(&FunctionRunnerService_ServiceDesc, srv) +} + +func _FunctionRunnerService_RunFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunFunctionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FunctionRunnerServiceServer).RunFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FunctionRunnerService_RunFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FunctionRunnerServiceServer).RunFunction(ctx, req.(*RunFunctionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// FunctionRunnerService_ServiceDesc is the grpc.ServiceDesc for FunctionRunnerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FunctionRunnerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "apiextensions.fn.proto.v1beta1.FunctionRunnerService", + HandlerType: (*FunctionRunnerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "RunFunction", + Handler: _FunctionRunnerService_RunFunction_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "apiextensions/fn/proto/v1beta1/zz_generated_run_function.proto", +} diff --git a/apis/apiextensions/v1/composition_common.go b/apis/apiextensions/v1/composition_common.go index 4800d6a1d..83b5bed70 100644 --- a/apis/apiextensions/v1/composition_common.go +++ b/apis/apiextensions/v1/composition_common.go @@ -35,7 +35,7 @@ import ( into composition_revision_types.go. */ -// A CompositionMode determines what mode of Composition is used +// A CompositionMode determines what mode of Composition is used. type CompositionMode string const ( @@ -48,9 +48,6 @@ const ( // CompositionModePipeline indicates that a Composition specifies a pipeline // of Composition Functions, each of which is responsible for producing // composed resources that Crossplane should create or update. - // - // THIS IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. CompositionModePipeline CompositionMode = "Pipeline" ) @@ -63,7 +60,7 @@ type TypeReference struct { Kind string `json:"kind"` } -// TypeReferenceTo returns a reference to the supplied GroupVersionKind +// TypeReferenceTo returns a reference to the supplied GroupVersionKind. func TypeReferenceTo(gvk schema.GroupVersionKind) TypeReference { return TypeReference{APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind} } @@ -146,7 +143,7 @@ func (t *ReadinessCheckType) IsValid() bool { } // ReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type ReadinessCheck struct { // TODO(negz): Optional fields should be nil in the next version of this // API. How would we know if we actually wanted to match the empty string, @@ -174,7 +171,7 @@ type ReadinessCheck struct { } // MatchConditionReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type MatchConditionReadinessCheck struct { // Type indicates the type of condition you'd like to use. // +kubebuilder:default="Ready" @@ -200,7 +197,7 @@ func (m *MatchConditionReadinessCheck) Validate() *field.Error { } // Validate checks if the readiness check is logically valid. -func (r *ReadinessCheck) Validate() *field.Error { //nolint:gocyclo // This function is not that complex, just a switch +func (r *ReadinessCheck) Validate() *field.Error { if !r.Type.IsValid() { return field.Invalid(field.NewPath("type"), string(r.Type), "unknown readiness check type") } @@ -298,6 +295,12 @@ type PipelineStep struct { // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:EmbeddedResource Input *runtime.RawExtension `json:"input,omitempty"` + + // Credentials are optional credentials that the Composition Function needs. + // +optional + // +listType=map + // +listMapKey=name + Credentials []FunctionCredentials `json:"credentials,omitempty"` } // A FunctionReference references a Composition Function that may be used in a @@ -307,6 +310,36 @@ type FunctionReference struct { Name string `json:"name"` } +// FunctionCredentials are optional credentials that a Composition Function +// needs to run. +type FunctionCredentials struct { + // Name of this set of credentials. + Name string `json:"name"` + + // Source of the function credentials. + // +kubebuilder:validation:Enum=None;Secret + Source FunctionCredentialsSource `json:"source"` + + // A SecretRef is a reference to a secret containing credentials that should + // be supplied to the function. + // +optional + SecretRef *xpv1.SecretReference `json:"secretRef,omitempty"` +} + +// A FunctionCredentialsSource is a source from which Composition Function +// credentials may be acquired. +type FunctionCredentialsSource string + +const ( + // FunctionCredentialsSourceNone indicates that a function does not require + // credentials. + FunctionCredentialsSourceNone FunctionCredentialsSource = "None" + + // FunctionCredentialsSourceSecret indicates that a function should acquire + // credentials from a secret. + FunctionCredentialsSourceSecret FunctionCredentialsSource = "Secret" +) + // A StoreConfigReference references a secret store config that may be used to // write connection details. type StoreConfigReference struct { diff --git a/apis/apiextensions/v1/composition_environment.go b/apis/apiextensions/v1/composition_environment.go index a7d6fb210..8635795b3 100644 --- a/apis/apiextensions/v1/composition_environment.go +++ b/apis/apiextensions/v1/composition_environment.go @@ -80,7 +80,6 @@ func (e *EnvironmentConfiguration) Validate() field.ErrorList { // ShouldResolve specifies whether EnvironmentConfiguration should be resolved or not. func (e *EnvironmentConfiguration) ShouldResolve(currentRefs []corev1.ObjectReference) bool { - if e == nil || len(e.EnvironmentConfigs) == 0 { return false } @@ -185,7 +184,6 @@ const ( // An EnvironmentSourceSelector selects an EnvironmentConfig via labels. type EnvironmentSourceSelector struct { - // Mode specifies retrieval strategy: "Single" or "Multiple". // +kubebuilder:validation:Enum=Single;Multiple // +kubebuilder:default=Single @@ -207,7 +205,6 @@ type EnvironmentSourceSelector struct { // Validate logically validates the EnvironmentSourceSelector. func (e *EnvironmentSourceSelector) Validate() *field.Error { - if e.Mode == EnvironmentSourceSelectorSingleMode && e.MaxMatch != nil { return field.Forbidden(field.NewPath("maxMatch"), "maxMatch is not supported in Single mode") } diff --git a/apis/apiextensions/v1/composition_environment_test.go b/apis/apiextensions/v1/composition_environment_test.go index 12d4a8aa0..d3e6e4904 100644 --- a/apis/apiextensions/v1/composition_environment_test.go +++ b/apis/apiextensions/v1/composition_environment_test.go @@ -151,7 +151,6 @@ func TestEnvironmentShouldResolve(t *testing.T) { } for name, tc := range cases { - t.Run(name, func(t *testing.T) { got := tc.args.ec.ShouldResolve(tc.args.refs) if diff := cmp.Diff(tc.want, got); diff != "" { @@ -162,7 +161,6 @@ func TestEnvironmentShouldResolve(t *testing.T) { } func TestEnvironmentSourceSelectorValidate(t *testing.T) { - type args struct { e *EnvironmentSourceSelector } @@ -201,7 +199,6 @@ func TestEnvironmentSourceSelectorValidate(t *testing.T) { } for name, tc := range cases { - t.Run(name, func(t *testing.T) { got := tc.args.e.Validate() if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(field.Error{}, "Detail", "BadValue")); diff != "" { diff --git a/apis/apiextensions/v1/composition_revision_types.go b/apis/apiextensions/v1/composition_revision_types.go index 83fbf46de..301262e51 100644 --- a/apis/apiextensions/v1/composition_revision_types.go +++ b/apis/apiextensions/v1/composition_revision_types.go @@ -38,21 +38,24 @@ const ( type CompositionRevisionSpec struct { // CompositeTypeRef specifies the type of composite resource that this // composition is compatible with. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" CompositeTypeRef TypeReference `json:"compositeTypeRef"` // Mode controls what type or "mode" of Composition will be used. // - // "Resources" (the default) indicates that a Composition uses what is - // commonly referred to as "Patch & Transform" or P&T composition. This mode - // of Composition uses an array of resources, each a template for a composed - // resource. + // "Pipeline" indicates that a Composition specifies a pipeline of + // Composition Functions, each of which is responsible for producing + // composed resources that Crossplane should create or update. + // + // "Resources" indicates that a Composition uses what is commonly referred + // to as "Patch & Transform" or P&T composition. This mode of Composition + // uses an array of resources, each a template for a composed resource. + // + // All Compositions should use Pipeline mode. Resources mode is deprecated. + // Resources mode won't be removed in Crossplane 1.x, and will remain the + // default to avoid breaking legacy Compositions. However, it's no longer + // accepting new features, and only accepting security related bug fixes. // - // "Pipeline" indicates that a Composition specifies a pipeline - // of Composition Functions, each of which is responsible for producing - // composed resources that Crossplane should create or update. THE PIPELINE - // MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +kubebuilder:validation:Enum=Resources;Pipeline // +kubebuilder:default=Resources @@ -64,6 +67,9 @@ type CompositionRevisionSpec struct { // // PatchSets are only used by the "Resources" mode of Composition. They // are ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional PatchSets []PatchSet `json:"patchSets,omitempty"` @@ -80,6 +86,9 @@ type CompositionRevisionSpec struct { // // Resources are only used by the "Resources" mode of Composition. They are // ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional Resources []ComposedTemplate `json:"resources,omitempty"` @@ -89,10 +98,9 @@ type CompositionRevisionSpec struct { // // The Pipeline is only used by the "Pipeline" mode of Composition. It is // ignored by other modes. - // - // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional + // +listType=map + // +listMapKey=step Pipeline []PipelineStep `json:"pipeline,omitempty"` // WriteConnectionSecretsToNamespace specifies the namespace in which the @@ -118,7 +126,11 @@ type CompositionRevisionSpec struct { PublishConnectionDetailsWithStoreConfigRef *StoreConfigReference `json:"publishConnectionDetailsWithStoreConfigRef,omitempty"` // Revision number. Newer revisions have larger numbers. - // +immutable + // + // This number can change. When a Composition transitions from state A + // -> B -> A there will be only two CompositionRevisions. Crossplane will + // edit the original CompositionRevision to change its revision number from + // 0 to 2. Revision int64 `json:"revision"` } @@ -133,8 +145,11 @@ type CompositionRevisionStatus struct { // +genclient // +genclient:nonNamespaced -// A CompositionRevision represents a revision in time of a Composition. -// Revisions are created by Crossplane; they should be treated as immutable. +// A CompositionRevision represents a revision of a Composition. Crossplane +// creates new revisions when there are changes to the Composition. +// +// Crossplane creates and manages CompositionRevisions. Don't directly edit +// CompositionRevisions. // +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" diff --git a/apis/apiextensions/v1/composition_transforms.go b/apis/apiextensions/v1/composition_transforms.go index 5a15600ad..a972e58b9 100644 --- a/apis/apiextensions/v1/composition_transforms.go +++ b/apis/apiextensions/v1/composition_transforms.go @@ -45,7 +45,6 @@ const ( // Transform is a unit of process whose input is transformed into an output with // the supplied configuration. type Transform struct { - // Type of the transform to be run. // +kubebuilder:validation:Enum=map;match;math;string;convert Type TransformType `json:"type"` @@ -74,8 +73,6 @@ type Transform struct { } // Validate this Transform is valid. -// -//nolint:gocyclo // This is a long but simple/same-y switch. func (t *Transform) Validate() *field.Error { switch t.Type { case TransformTypeMath: @@ -360,7 +357,6 @@ const ( // A StringTransform returns a string given the supplied input. type StringTransform struct { - // Type of the string transform to be run. // +optional // +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp;Join @@ -378,8 +374,9 @@ type StringTransform struct { // `ToJson` converts any input value into its raw JSON representation. // `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input // converted to JSON. + // `ToAdler32` generate a addler32 hash based on the input string. // +optional - // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512 + // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512;ToAdler32 Convert *StringConversionType `json:"convert,omitempty"` // Trim the prefix or suffix from the input @@ -396,8 +393,6 @@ type StringTransform struct { } // Validate checks this StringTransform is valid. -// -//nolint:gocyclo // just a switch func (s *StringTransform) Validate() *field.Error { switch s.Type { case StringTransformTypeFormat, "": @@ -430,7 +425,6 @@ func (s *StringTransform) Validate() *field.Error { return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type") } return nil - } // A StringTransformRegexp extracts a match from the input using a regular diff --git a/apis/apiextensions/v1/composition_types.go b/apis/apiextensions/v1/composition_types.go index 597f12090..4ffe3c44c 100644 --- a/apis/apiextensions/v1/composition_types.go +++ b/apis/apiextensions/v1/composition_types.go @@ -24,21 +24,24 @@ import ( type CompositionSpec struct { // CompositeTypeRef specifies the type of composite resource that this // composition is compatible with. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" CompositeTypeRef TypeReference `json:"compositeTypeRef"` // Mode controls what type or "mode" of Composition will be used. // - // "Resources" (the default) indicates that a Composition uses what is - // commonly referred to as "Patch & Transform" or P&T composition. This mode - // of Composition uses an array of resources, each a template for a composed - // resource. + // "Pipeline" indicates that a Composition specifies a pipeline of + // Composition Functions, each of which is responsible for producing + // composed resources that Crossplane should create or update. + // + // "Resources" indicates that a Composition uses what is commonly referred + // to as "Patch & Transform" or P&T composition. This mode of Composition + // uses an array of resources, each a template for a composed resource. + // + // All Compositions should use Pipeline mode. Resources mode is deprecated. + // Resources mode won't be removed in Crossplane 1.x, and will remain the + // default to avoid breaking legacy Compositions. However, it's no longer + // accepting new features, and only accepting security related bug fixes. // - // "Pipeline" indicates that a Composition specifies a pipeline - // of Composition Functions, each of which is responsible for producing - // composed resources that Crossplane should create or update. THE PIPELINE - // MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +kubebuilder:validation:Enum=Resources;Pipeline // +kubebuilder:default=Resources @@ -50,6 +53,9 @@ type CompositionSpec struct { // // PatchSets are only used by the "Resources" mode of Composition. They // are ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional PatchSets []PatchSet `json:"patchSets,omitempty"` @@ -66,6 +72,9 @@ type CompositionSpec struct { // // Resources are only used by the "Resources" mode of Composition. They are // ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional Resources []ComposedTemplate `json:"resources,omitempty"` @@ -75,10 +84,9 @@ type CompositionSpec struct { // // The Pipeline is only used by the "Pipeline" mode of Composition. It is // ignored by other modes. - // - // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional + // +listType=map + // +listMapKey=step Pipeline []PipelineStep `json:"pipeline,omitempty"` // WriteConnectionSecretsToNamespace specifies the namespace in which the @@ -109,7 +117,11 @@ type CompositionSpec struct { // +genclient // +genclient:nonNamespaced -// A Composition specifies how a composite resource should be composed. +// A Composition defines a collection of managed resources or functions that +// Crossplane uses to create and manage new composite resources. +// +// Read the Crossplane documentation for +// [more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions). // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/apis/apiextensions/v1/composition_validation.go b/apis/apiextensions/v1/composition_validation.go index 75b8586bb..bdbd15961 100644 --- a/apis/apiextensions/v1/composition_validation.go +++ b/apis/apiextensions/v1/composition_validation.go @@ -69,6 +69,23 @@ func (c *Composition) validatePipeline() (errs field.ErrorList) { errs = append(errs, field.Duplicate(field.NewPath("spec", "pipeline").Index(i).Child("step"), f.Step)) } seen[f.Step] = true + + seenCred := map[string]bool{} + for j, cs := range f.Credentials { + if seenCred[cs.Name] { + errs = append(errs, field.Duplicate(field.NewPath("spec", "pipeline").Index(i).Child("credentials").Index(j).Child("name"), cs.Name)) + } + seenCred[cs.Name] = true + + switch cs.Source { + case FunctionCredentialsSourceSecret: + if cs.SecretRef == nil { + errs = append(errs, field.Required(field.NewPath("spec", "pipeline").Index(i).Child("credentials").Index(j).Child("secretRef"), "must be specified when source is Secret")) + } + case FunctionCredentialsSourceNone: + // No requirements here. + } + } } return errs } @@ -76,7 +93,7 @@ func (c *Composition) validatePipeline() (errs field.ErrorList) { // validatePatchSets checks that: // - patchSets are composed of valid patches // - there are no nested patchSets -// - only existing patchSets are used by resources +// - only existing patchSets are used by resources. func (c *Composition) validatePatchSets() (errs field.ErrorList) { definedPatchSets := make(map[string]bool, len(c.Spec.PatchSets)) for i, s := range c.Spec.PatchSets { diff --git a/apis/apiextensions/v1/composition_validation_test.go b/apis/apiextensions/v1/composition_validation_test.go index c808da010..ca873dc2a 100644 --- a/apis/apiextensions/v1/composition_validation_test.go +++ b/apis/apiextensions/v1/composition_validation_test.go @@ -510,6 +510,67 @@ func TestCompositionValidatePipeline(t *testing.T) { }, }, }, + "InvalidDuplicateCredentialNames": { + reason: "A step's credential names must be unique", + args: args{ + comp: &Composition{ + Spec: CompositionSpec{ + Mode: ptr.To(CompositionModePipeline), + Pipeline: []PipelineStep{ + { + Step: "duplicate-creds", + Credentials: []FunctionCredentials{ + { + Name: "foo", + }, + { + Name: "foo", + }, + }, + }, + }, + }, + }, + }, + want: want{ + output: field.ErrorList{ + { + Type: field.ErrorTypeDuplicate, + Field: "spec.pipeline[0].credentials[1].name", + BadValue: "foo", + }, + }, + }, + }, + "InvalidMissingSecretRef": { + reason: "A step's credential must specify a secretRef if its source is a secret", + args: args{ + comp: &Composition{ + Spec: CompositionSpec{ + Mode: ptr.To(CompositionModePipeline), + Pipeline: []PipelineStep{ + { + Step: "duplicate-creds", + Credentials: []FunctionCredentials{ + { + Name: "foo", + Source: FunctionCredentialsSourceSecret, + }, + }, + }, + }, + }, + }, + }, + want: want{ + output: field.ErrorList{ + { + Type: field.ErrorTypeRequired, + Field: "spec.pipeline[0].credentials[0].secretRef", + }, + }, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { @@ -713,7 +774,9 @@ func TestCompositionValidateEnvironment(t *testing.T) { comp: &Composition{ Spec: CompositionSpec{ Environment: &EnvironmentConfiguration{}, - }}}, + }, + }, + }, }, "ValidNilEnvironment": { reason: "Should accept a nil environment", @@ -721,7 +784,9 @@ func TestCompositionValidateEnvironment(t *testing.T) { comp: &Composition{ Spec: CompositionSpec{ Environment: nil, - }}}, + }, + }, + }, }, "ValidEnvironment": { reason: "Should accept a valid environment", @@ -751,7 +816,15 @@ func TestCompositionValidateEnvironment(t *testing.T) { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, Key: "foo", ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidPatchEnvironment": { reason: "Should reject an environment declaring an invalid patch", @@ -770,7 +843,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { Patches: []EnvironmentPatch{ { Type: PatchTypeFromCompositeFieldPath, - //FromFieldPath: ptr.To("spec.foo"), // missing + // FromFieldPath: ptr.To("spec.foo"), // missing ToFieldPath: ptr.To("metadata.annotations[\"foo\"]"), }, }, @@ -789,7 +862,15 @@ func TestCompositionValidateEnvironment(t *testing.T) { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, Key: "foo", ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidEnvironmentSourceReferenceNoName": { reason: "Should reject a invalid environment, due to a missing name", @@ -809,7 +890,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { { Type: EnvironmentSourceTypeReference, Ref: &EnvironmentSourceReference{ - //Name: "foo", // missing + // Name: "foo", // missing }, }, { @@ -820,7 +901,15 @@ func TestCompositionValidateEnvironment(t *testing.T) { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, Key: "foo", ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidEnvironmentSourceSelectorNoKey": { reason: "Should reject a invalid environment due to a missing key in a selector", @@ -849,9 +938,17 @@ func TestCompositionValidateEnvironment(t *testing.T) { MatchLabels: []EnvironmentSourceSelectorLabelMatcher{ { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, - //Key: "foo", // missing + // Key: "foo", // missing ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "InvalidMultipleErrors": { reason: "Should reject a invalid environment due to multiple errors, reporting all of them", @@ -862,7 +959,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { Patches: []EnvironmentPatch{ { Type: PatchTypeFromCompositeFieldPath, - //FromFieldPath: ptr.To("spec.foo"), // missing + // FromFieldPath: ptr.To("spec.foo"), // missing ToFieldPath: ptr.To("metadata.annotations[\"foo\"]"), }, }, @@ -870,7 +967,7 @@ func TestCompositionValidateEnvironment(t *testing.T) { { Type: EnvironmentSourceTypeReference, Ref: &EnvironmentSourceReference{ - //Name: "foo", // missing + // Name: "foo", // missing }, }, { @@ -879,9 +976,17 @@ func TestCompositionValidateEnvironment(t *testing.T) { MatchLabels: []EnvironmentSourceSelectorLabelMatcher{ { Type: EnvironmentSourceSelectorLabelMatcherTypeFromCompositeFieldPath, - //Key: "foo", // missing + // Key: "foo", // missing ValueFromFieldPath: ptr.To("spec.foo"), - }}}}}}}}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, want: want{ output: field.ErrorList{ { diff --git a/apis/apiextensions/v1/composition_webhooks.go b/apis/apiextensions/v1/composition_webhooks.go index c4b7310d0..80fda5572 100644 --- a/apis/apiextensions/v1/composition_webhooks.go +++ b/apis/apiextensions/v1/composition_webhooks.go @@ -50,12 +50,12 @@ var ( ) // GetSchemaAwareValidationMode returns the schema-aware validation mode set for the Composition. -func (in *Composition) GetSchemaAwareValidationMode() (CompositionValidationMode, error) { - if in.Annotations == nil { +func (c *Composition) GetSchemaAwareValidationMode() (CompositionValidationMode, error) { + if c.Annotations == nil { return DefaultSchemaAwareCompositionValidationMode, nil } - mode, ok := in.Annotations[SchemaAwareCompositionValidationModeAnnotation] + mode, ok := c.Annotations[SchemaAwareCompositionValidationModeAnnotation] if !ok { return DefaultSchemaAwareCompositionValidationMode, nil } diff --git a/apis/apiextensions/v1/register.go b/apis/apiextensions/v1/register.go index 1f8100cca..8caa69904 100644 --- a/apis/apiextensions/v1/register.go +++ b/apis/apiextensions/v1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/apiextensions/v1/xrd_types.go b/apis/apiextensions/v1/xrd_types.go index e30b9f789..1b7477973 100644 --- a/apis/apiextensions/v1/xrd_types.go +++ b/apis/apiextensions/v1/xrd_types.go @@ -30,12 +30,12 @@ type CompositeResourceDefinitionSpec struct { // Group specifies the API group of the defined composite resource. // Composite resources are served under `/apis//...`. Must match the // name of the XRD (in the form `.`). - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Group string `json:"group"` // Names specifies the resource and kind names of the defined composite // resource. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Names extv1.CustomResourceDefinitionNames `json:"names"` // ClaimNames specifies the names of an optional composite resource claim. @@ -46,8 +46,8 @@ type CompositeResourceDefinitionSpec struct { // create, update, or delete a corresponding composite resource. You may add // claim names to an existing CompositeResourceDefinition, but they cannot // be changed or removed once they have been set. - // +immutable // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" ClaimNames *extv1.CustomResourceDefinitionNames `json:"claimNames,omitempty"` // ConnectionSecretKeys is the list of keys that will be exposed to the end @@ -70,7 +70,7 @@ type CompositeResourceDefinitionSpec struct { // EnforcedCompositionRef refers to the Composition resource that will be used // by all composite instances whose schema is defined by this definition. // +optional - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" EnforcedCompositionRef *CompositionReference `json:"enforcedCompositionRef,omitempty"` // DefaultCompositionUpdatePolicy is the policy used when updating composites after a new @@ -177,7 +177,7 @@ type CompositeResourceValidation struct { // OpenAPIV3Schema is the OpenAPI v3 schema to use for validation and // pruning. // +kubebuilder:pruning:PreserveUnknownFields - OpenAPIV3Schema runtime.RawExtension `json:"openAPIV3Schema,omitempty"` + OpenAPIV3Schema runtime.RawExtension `json:"openAPIV3Schema,omitempty"` //nolint:tagliatelle // False positive. Linter thinks it should be Apiv3, not APIV3. } // CompositeResourceDefinitionStatus shows the observed state of the definition. @@ -212,9 +212,11 @@ type CompositeResourceDefinitionControllerStatus struct { // +genclient // +genclient:nonNamespaced -// A CompositeResourceDefinition defines a new kind of composite infrastructure -// resource. The new resource is composed of other composite or managed -// infrastructure resources. +// A CompositeResourceDefinition defines the schema for a new custom Kubernetes +// API. +// +// Read the Crossplane documentation for +// [more information about CustomResourceDefinitions](https://docs.crossplane.io/latest/concepts/composite-resource-definitions). // +kubebuilder:printcolumn:name="ESTABLISHED",type="string",JSONPath=".status.conditions[?(@.type=='Established')].status" // +kubebuilder:printcolumn:name="OFFERED",type="string",JSONPath=".status.conditions[?(@.type=='Offered')].status" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" @@ -239,44 +241,44 @@ type CompositeResourceDefinitionList struct { // GetCompositeGroupVersionKind returns the schema.GroupVersionKind of the CRD for // the composite resource this CompositeResourceDefinition defines. -func (in CompositeResourceDefinition) GetCompositeGroupVersionKind() schema.GroupVersionKind { +func (c CompositeResourceDefinition) GetCompositeGroupVersionKind() schema.GroupVersionKind { v := "" - for _, vr := range in.Spec.Versions { + for _, vr := range c.Spec.Versions { if vr.Referenceable { v = vr.Name } } - return schema.GroupVersionKind{Group: in.Spec.Group, Version: v, Kind: in.Spec.Names.Kind} + return schema.GroupVersionKind{Group: c.Spec.Group, Version: v, Kind: c.Spec.Names.Kind} } // OffersClaim is true when a CompositeResourceDefinition offers a claim for the // composite resource it defines. -func (in CompositeResourceDefinition) OffersClaim() bool { - return in.Spec.ClaimNames != nil +func (c CompositeResourceDefinition) OffersClaim() bool { + return c.Spec.ClaimNames != nil } // GetClaimGroupVersionKind returns the schema.GroupVersionKind of the CRD for // the composite resource claim this CompositeResourceDefinition defines. An // empty GroupVersionKind is returned if the CompositeResourceDefinition does // not offer a claim. -func (in CompositeResourceDefinition) GetClaimGroupVersionKind() schema.GroupVersionKind { - if !in.OffersClaim() { +func (c CompositeResourceDefinition) GetClaimGroupVersionKind() schema.GroupVersionKind { + if !c.OffersClaim() { return schema.GroupVersionKind{} } v := "" - for _, vr := range in.Spec.Versions { + for _, vr := range c.Spec.Versions { if vr.Referenceable { v = vr.Name } } - return schema.GroupVersionKind{Group: in.Spec.Group, Version: v, Kind: in.Spec.ClaimNames.Kind} + return schema.GroupVersionKind{Group: c.Spec.Group, Version: v, Kind: c.Spec.ClaimNames.Kind} } // GetConnectionSecretKeys returns the set of allowed keys to filter the connection // secret. -func (in *CompositeResourceDefinition) GetConnectionSecretKeys() []string { - return in.Spec.ConnectionSecretKeys +func (c *CompositeResourceDefinition) GetConnectionSecretKeys() []string { + return c.Spec.ConnectionSecretKeys } diff --git a/apis/apiextensions/v1/zz_generated.conversion.go b/apis/apiextensions/v1/zz_generated.conversion.go old mode 100755 new mode 100644 index d843ef6ae..fbd5113a7 --- a/apis/apiextensions/v1/zz_generated.conversion.go +++ b/apis/apiextensions/v1/zz_generated.conversion.go @@ -337,6 +337,16 @@ func (c *GeneratedRevisionSpecConverter) pV1PolicyToPV1Policy(source *v11.Policy } return pV1Policy } +func (c *GeneratedRevisionSpecConverter) pV1SecretReferenceToPV1SecretReference(source *v11.SecretReference) *v11.SecretReference { + var pV1SecretReference *v11.SecretReference + if source != nil { + var v1SecretReference v11.SecretReference + v1SecretReference.Name = (*source).Name + v1SecretReference.Namespace = (*source).Namespace + pV1SecretReference = &v1SecretReference + } + return pV1SecretReference +} func (c *GeneratedRevisionSpecConverter) pV1StoreConfigReferenceToPV1StoreConfigReference(source *StoreConfigReference) *StoreConfigReference { var pV1StoreConfigReference *StoreConfigReference if source != nil { @@ -540,6 +550,13 @@ func (c *GeneratedRevisionSpecConverter) v1EnvironmentSourceToV1EnvironmentSourc v1EnvironmentSource.Selector = c.pV1EnvironmentSourceSelectorToPV1EnvironmentSourceSelector(source.Selector) return v1EnvironmentSource } +func (c *GeneratedRevisionSpecConverter) v1FunctionCredentialsToV1FunctionCredentials(source FunctionCredentials) FunctionCredentials { + var v1FunctionCredentials FunctionCredentials + v1FunctionCredentials.Name = source.Name + v1FunctionCredentials.Source = FunctionCredentialsSource(source.Source) + v1FunctionCredentials.SecretRef = c.pV1SecretReferenceToPV1SecretReference(source.SecretRef) + return v1FunctionCredentials +} func (c *GeneratedRevisionSpecConverter) v1FunctionReferenceToV1FunctionReference(source FunctionReference) FunctionReference { var v1FunctionReference FunctionReference v1FunctionReference.Name = source.Name @@ -626,6 +643,14 @@ func (c *GeneratedRevisionSpecConverter) v1PipelineStepToV1PipelineStep(source P v1PipelineStep.Step = source.Step v1PipelineStep.FunctionRef = c.v1FunctionReferenceToV1FunctionReference(source.FunctionRef) v1PipelineStep.Input = c.pRuntimeRawExtensionToPRuntimeRawExtension(source.Input) + var v1FunctionCredentialsList []FunctionCredentials + if source.Credentials != nil { + v1FunctionCredentialsList = make([]FunctionCredentials, len(source.Credentials)) + for i := 0; i < len(source.Credentials); i++ { + v1FunctionCredentialsList[i] = c.v1FunctionCredentialsToV1FunctionCredentials(source.Credentials[i]) + } + } + v1PipelineStep.Credentials = v1FunctionCredentialsList return v1PipelineStep } func (c *GeneratedRevisionSpecConverter) v1ReadinessCheckToV1ReadinessCheck(source ReadinessCheck) ReadinessCheck { diff --git a/apis/apiextensions/v1/zz_generated.deepcopy.go b/apis/apiextensions/v1/zz_generated.deepcopy.go index ac1d21cfe..dcfac056f 100644 --- a/apis/apiextensions/v1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1/zz_generated.deepcopy.go @@ -851,6 +851,26 @@ func (in *EnvironmentSourceSelectorLabelMatcher) DeepCopy() *EnvironmentSourceSe return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionCredentials) DeepCopyInto(out *FunctionCredentials) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(commonv1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionCredentials. +func (in *FunctionCredentials) DeepCopy() *FunctionCredentials { + if in == nil { + return nil + } + out := new(FunctionCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FunctionReference) DeepCopyInto(out *FunctionReference) { *out = *in @@ -1100,6 +1120,13 @@ func (in *PipelineStep) DeepCopyInto(out *PipelineStep) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]FunctionCredentials, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStep. diff --git a/apis/apiextensions/v1alpha1/environment_config_types.go b/apis/apiextensions/v1alpha1/environment_config_types.go index 217b65006..d76a231f3 100644 --- a/apis/apiextensions/v1alpha1/environment_config_types.go +++ b/apis/apiextensions/v1alpha1/environment_config_types.go @@ -26,7 +26,11 @@ import ( // +genclient // +genclient:nonNamespaced -// A EnvironmentConfig contains a set of arbitrary, unstructured values. +// An EnvironmentConfig contains user-defined unstructured values for +// use in a Composition. +// +// Read the Crossplane documentation for +// [more information about EnvironmentConfigs](https://docs.crossplane.io/latest/concepts/environment-configs). // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster,categories=crossplane,shortName=envcfg type EnvironmentConfig struct { diff --git a/apis/apiextensions/v1alpha1/register.go b/apis/apiextensions/v1alpha1/register.go index b8a13c126..cdb549a3a 100644 --- a/apis/apiextensions/v1alpha1/register.go +++ b/apis/apiextensions/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/apiextensions/v1alpha1/usage_types.go b/apis/apiextensions/v1alpha1/usage_types.go index f590b4a10..f1d1370e3 100644 --- a/apis/apiextensions/v1alpha1/usage_types.go +++ b/apis/apiextensions/v1alpha1/usage_types.go @@ -68,6 +68,9 @@ type UsageSpec struct { // Reason is the reason for blocking deletion of the resource. // +optional Reason *string `json:"reason,omitempty"` + // ReplayDeletion will trigger a deletion on the used resource during the deletion of the usage itself, if it was attempted to be deleted at least once. + // +optional + ReplayDeletion *bool `json:"replayDeletion,omitempty"` } // UsageStatus defines the observed state of Usage. @@ -76,6 +79,12 @@ type UsageStatus struct { } // A Usage defines a deletion blocking relationship between two resources. +// +// Usages prevent accidental deletion of a single resource or deletion of +// resources with dependent resources. +// +// Read the Crossplane documentation for +// [more information about Compositions](https://docs.crossplane.io/latest/concepts/usages). // +kubebuilder:object:root=true // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="DETAILS",type="string",JSONPath=".metadata.annotations.crossplane\\.io/usage-details" diff --git a/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go b/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go index c53d5a361..45148977d 100644 --- a/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1alpha1/zz_generated.deepcopy.go @@ -229,6 +229,11 @@ func (in *UsageSpec) DeepCopyInto(out *UsageSpec) { *out = new(string) **out = **in } + if in.ReplayDeletion != nil { + in, out := &in.ReplayDeletion, &out.ReplayDeletion + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UsageSpec. diff --git a/apis/apiextensions/v1beta1/register.go b/apis/apiextensions/v1beta1/register.go index 22eaf4bbf..212dfa5b4 100644 --- a/apis/apiextensions/v1beta1/register.go +++ b/apis/apiextensions/v1beta1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_common.go b/apis/apiextensions/v1beta1/zz_generated.composition_common.go index d2b488430..ba1e7dfdb 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_common.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_common.go @@ -37,7 +37,7 @@ import ( into composition_revision_types.go. */ -// A CompositionMode determines what mode of Composition is used +// A CompositionMode determines what mode of Composition is used. type CompositionMode string const ( @@ -50,9 +50,6 @@ const ( // CompositionModePipeline indicates that a Composition specifies a pipeline // of Composition Functions, each of which is responsible for producing // composed resources that Crossplane should create or update. - // - // THIS IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. CompositionModePipeline CompositionMode = "Pipeline" ) @@ -65,7 +62,7 @@ type TypeReference struct { Kind string `json:"kind"` } -// TypeReferenceTo returns a reference to the supplied GroupVersionKind +// TypeReferenceTo returns a reference to the supplied GroupVersionKind. func TypeReferenceTo(gvk schema.GroupVersionKind) TypeReference { return TypeReference{APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind} } @@ -148,7 +145,7 @@ func (t *ReadinessCheckType) IsValid() bool { } // ReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type ReadinessCheck struct { // TODO(negz): Optional fields should be nil in the next version of this // API. How would we know if we actually wanted to match the empty string, @@ -176,7 +173,7 @@ type ReadinessCheck struct { } // MatchConditionReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type MatchConditionReadinessCheck struct { // Type indicates the type of condition you'd like to use. // +kubebuilder:default="Ready" @@ -202,7 +199,7 @@ func (m *MatchConditionReadinessCheck) Validate() *field.Error { } // Validate checks if the readiness check is logically valid. -func (r *ReadinessCheck) Validate() *field.Error { //nolint:gocyclo // This function is not that complex, just a switch +func (r *ReadinessCheck) Validate() *field.Error { if !r.Type.IsValid() { return field.Invalid(field.NewPath("type"), string(r.Type), "unknown readiness check type") } @@ -300,6 +297,12 @@ type PipelineStep struct { // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:EmbeddedResource Input *runtime.RawExtension `json:"input,omitempty"` + + // Credentials are optional credentials that the Composition Function needs. + // +optional + // +listType=map + // +listMapKey=name + Credentials []FunctionCredentials `json:"credentials,omitempty"` } // A FunctionReference references a Composition Function that may be used in a @@ -309,6 +312,36 @@ type FunctionReference struct { Name string `json:"name"` } +// FunctionCredentials are optional credentials that a Composition Function +// needs to run. +type FunctionCredentials struct { + // Name of this set of credentials. + Name string `json:"name"` + + // Source of the function credentials. + // +kubebuilder:validation:Enum=None;Secret + Source FunctionCredentialsSource `json:"source"` + + // A SecretRef is a reference to a secret containing credentials that should + // be supplied to the function. + // +optional + SecretRef *xpv1.SecretReference `json:"secretRef,omitempty"` +} + +// A FunctionCredentialsSource is a source from which Composition Function +// credentials may be acquired. +type FunctionCredentialsSource string + +const ( + // FunctionCredentialsSourceNone indicates that a function does not require + // credentials. + FunctionCredentialsSourceNone FunctionCredentialsSource = "None" + + // FunctionCredentialsSourceSecret indicates that a function should acquire + // credentials from a secret. + FunctionCredentialsSourceSecret FunctionCredentialsSource = "Secret" +) + // A StoreConfigReference references a secret store config that may be used to // write connection details. type StoreConfigReference struct { diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_environment.go b/apis/apiextensions/v1beta1/zz_generated.composition_environment.go index a521cbc1c..a7e0d451f 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_environment.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_environment.go @@ -82,7 +82,6 @@ func (e *EnvironmentConfiguration) Validate() field.ErrorList { // ShouldResolve specifies whether EnvironmentConfiguration should be resolved or not. func (e *EnvironmentConfiguration) ShouldResolve(currentRefs []corev1.ObjectReference) bool { - if e == nil || len(e.EnvironmentConfigs) == 0 { return false } @@ -187,7 +186,6 @@ const ( // An EnvironmentSourceSelector selects an EnvironmentConfig via labels. type EnvironmentSourceSelector struct { - // Mode specifies retrieval strategy: "Single" or "Multiple". // +kubebuilder:validation:Enum=Single;Multiple // +kubebuilder:default=Single @@ -209,7 +207,6 @@ type EnvironmentSourceSelector struct { // Validate logically validates the EnvironmentSourceSelector. func (e *EnvironmentSourceSelector) Validate() *field.Error { - if e.Mode == EnvironmentSourceSelectorSingleMode && e.MaxMatch != nil { return field.Forbidden(field.NewPath("maxMatch"), "maxMatch is not supported in Single mode") } diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go index 3415ce384..f110dccb7 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_revision_types.go @@ -40,21 +40,24 @@ const ( type CompositionRevisionSpec struct { // CompositeTypeRef specifies the type of composite resource that this // composition is compatible with. - // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" CompositeTypeRef TypeReference `json:"compositeTypeRef"` // Mode controls what type or "mode" of Composition will be used. // - // "Resources" (the default) indicates that a Composition uses what is - // commonly referred to as "Patch & Transform" or P&T composition. This mode - // of Composition uses an array of resources, each a template for a composed - // resource. + // "Pipeline" indicates that a Composition specifies a pipeline of + // Composition Functions, each of which is responsible for producing + // composed resources that Crossplane should create or update. + // + // "Resources" indicates that a Composition uses what is commonly referred + // to as "Patch & Transform" or P&T composition. This mode of Composition + // uses an array of resources, each a template for a composed resource. + // + // All Compositions should use Pipeline mode. Resources mode is deprecated. + // Resources mode won't be removed in Crossplane 1.x, and will remain the + // default to avoid breaking legacy Compositions. However, it's no longer + // accepting new features, and only accepting security related bug fixes. // - // "Pipeline" indicates that a Composition specifies a pipeline - // of Composition Functions, each of which is responsible for producing - // composed resources that Crossplane should create or update. THE PIPELINE - // MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional // +kubebuilder:validation:Enum=Resources;Pipeline // +kubebuilder:default=Resources @@ -66,6 +69,9 @@ type CompositionRevisionSpec struct { // // PatchSets are only used by the "Resources" mode of Composition. They // are ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional PatchSets []PatchSet `json:"patchSets,omitempty"` @@ -82,6 +88,9 @@ type CompositionRevisionSpec struct { // // Resources are only used by the "Resources" mode of Composition. They are // ignored by other modes. + // + // Deprecated: Use Composition Functions instead. + // // +optional Resources []ComposedTemplate `json:"resources,omitempty"` @@ -91,10 +100,9 @@ type CompositionRevisionSpec struct { // // The Pipeline is only used by the "Pipeline" mode of Composition. It is // ignored by other modes. - // - // THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - // feature flag is disabled. // +optional + // +listType=map + // +listMapKey=step Pipeline []PipelineStep `json:"pipeline,omitempty"` // WriteConnectionSecretsToNamespace specifies the namespace in which the @@ -120,7 +128,11 @@ type CompositionRevisionSpec struct { PublishConnectionDetailsWithStoreConfigRef *StoreConfigReference `json:"publishConnectionDetailsWithStoreConfigRef,omitempty"` // Revision number. Newer revisions have larger numbers. - // +immutable + // + // This number can change. When a Composition transitions from state A + // -> B -> A there will be only two CompositionRevisions. Crossplane will + // edit the original CompositionRevision to change its revision number from + // 0 to 2. Revision int64 `json:"revision"` } @@ -134,8 +146,11 @@ type CompositionRevisionStatus struct { // +genclient // +genclient:nonNamespaced -// A CompositionRevision represents a revision in time of a Composition. -// Revisions are created by Crossplane; they should be treated as immutable. +// A CompositionRevision represents a revision of a Composition. Crossplane +// creates new revisions when there are changes to the Composition. +// +// Crossplane creates and manages CompositionRevisions. Don't directly edit +// CompositionRevisions. // +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" // +kubebuilder:printcolumn:name="XR-KIND",type="string",JSONPath=".spec.compositeTypeRef.kind" // +kubebuilder:printcolumn:name="XR-APIVERSION",type="string",JSONPath=".spec.compositeTypeRef.apiVersion" diff --git a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go index c7f611757..c4e4a9406 100644 --- a/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go +++ b/apis/apiextensions/v1beta1/zz_generated.composition_transforms.go @@ -47,7 +47,6 @@ const ( // Transform is a unit of process whose input is transformed into an output with // the supplied configuration. type Transform struct { - // Type of the transform to be run. // +kubebuilder:validation:Enum=map;match;math;string;convert Type TransformType `json:"type"` @@ -76,8 +75,6 @@ type Transform struct { } // Validate this Transform is valid. -// -//nolint:gocyclo // This is a long but simple/same-y switch. func (t *Transform) Validate() *field.Error { switch t.Type { case TransformTypeMath: @@ -362,7 +359,6 @@ const ( // A StringTransform returns a string given the supplied input. type StringTransform struct { - // Type of the string transform to be run. // +optional // +kubebuilder:validation:Enum=Format;Convert;TrimPrefix;TrimSuffix;Regexp;Join @@ -380,8 +376,9 @@ type StringTransform struct { // `ToJson` converts any input value into its raw JSON representation. // `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input // converted to JSON. + // `ToAdler32` generate a addler32 hash based on the input string. // +optional - // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512 + // +kubebuilder:validation:Enum=ToUpper;ToLower;ToBase64;FromBase64;ToJson;ToSha1;ToSha256;ToSha512;ToAdler32 Convert *StringConversionType `json:"convert,omitempty"` // Trim the prefix or suffix from the input @@ -398,8 +395,6 @@ type StringTransform struct { } // Validate checks this StringTransform is valid. -// -//nolint:gocyclo // just a switch func (s *StringTransform) Validate() *field.Error { switch s.Type { case StringTransformTypeFormat, "": @@ -432,7 +427,6 @@ func (s *StringTransform) Validate() *field.Error { return field.Invalid(field.NewPath("type"), s.Type, "unknown string transform type") } return nil - } // A StringTransformRegexp extracts a match from the input using a regular diff --git a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go index e3cd28ed5..522fdc853 100644 --- a/apis/apiextensions/v1beta1/zz_generated.deepcopy.go +++ b/apis/apiextensions/v1beta1/zz_generated.deepcopy.go @@ -21,8 +21,8 @@ limitations under the License. package v1beta1 import ( - commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "github.com/crossplane/crossplane-runtime/apis/common/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -305,7 +305,7 @@ func (in *EnvironmentConfiguration) DeepCopyInto(out *EnvironmentConfiguration) *out = *in if in.DefaultData != nil { in, out := &in.DefaultData, &out.DefaultData - *out = make(map[string]v1.JSON, len(*in)) + *out = make(map[string]apiextensionsv1.JSON, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -326,7 +326,7 @@ func (in *EnvironmentConfiguration) DeepCopyInto(out *EnvironmentConfiguration) } if in.Policy != nil { in, out := &in.Policy, &out.Policy - *out = new(commonv1.Policy) + *out = new(v1.Policy) (*in).DeepCopyInto(*out) } } @@ -485,6 +485,26 @@ func (in *EnvironmentSourceSelectorLabelMatcher) DeepCopy() *EnvironmentSourceSe return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionCredentials) DeepCopyInto(out *FunctionCredentials) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionCredentials. +func (in *FunctionCredentials) DeepCopy() *FunctionCredentials { + if in == nil { + return nil + } + out := new(FunctionCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FunctionReference) DeepCopyInto(out *FunctionReference) { *out = *in @@ -505,7 +525,7 @@ func (in *MapTransform) DeepCopyInto(out *MapTransform) { *out = *in if in.Pairs != nil { in, out := &in.Pairs, &out.Pairs - *out = make(map[string]v1.JSON, len(*in)) + *out = make(map[string]apiextensionsv1.JSON, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -673,7 +693,7 @@ func (in *PatchPolicy) DeepCopyInto(out *PatchPolicy) { } if in.MergeOptions != nil { in, out := &in.MergeOptions, &out.MergeOptions - *out = new(commonv1.MergeOptions) + *out = new(v1.MergeOptions) (*in).DeepCopyInto(*out) } } @@ -719,6 +739,13 @@ func (in *PipelineStep) DeepCopyInto(out *PipelineStep) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]FunctionCredentials, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStep. diff --git a/apis/apis.go b/apis/apis.go index a7f723f0b..00eae4d5a 100644 --- a/apis/apis.go +++ b/apis/apis.go @@ -34,10 +34,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/generate.go b/apis/generate.go index 94ebe4468..4d3690710 100644 --- a/apis/generate.go +++ b/apis/generate.go @@ -18,7 +18,7 @@ limitations under the License. */ // NOTE(negz): See the below link for details on what is happening here. -// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +// https://go.dev/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module // Remove existing manifests //go:generate rm -rf ../cluster/crds @@ -32,8 +32,14 @@ limitations under the License. //go:generate ../hack/duplicate_api_type.sh apiextensions/v1/composition_transforms.go apiextensions/v1beta1 //go:generate ../hack/duplicate_api_type.sh apiextensions/v1/composition_environment.go apiextensions/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/package_types.go pkg/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/package_runtime_types.go pkg/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/revision_types.go pkg/v1beta1 +//go:generate ../hack/duplicate_api_type.sh pkg/v1/function_types.go pkg/v1beta1 + //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/configuration_types.go pkg/meta/v1alpha1 //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/provider_types.go pkg/meta/v1alpha1 +//go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/function_types.go pkg/meta/v1beta1 //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/meta.go pkg/meta/v1alpha1 //go:generate ../hack/duplicate_api_type.sh pkg/meta/v1/meta.go pkg/meta/v1beta1 @@ -57,6 +63,11 @@ limitations under the License. // Generate conversion code //go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./apiextensions/v1 //go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./pkg/meta/v1alpha1 +//go:generate go run -tags generate github.com/jmattheis/goverter/cmd/goverter gen -build-tags="" ./pkg/meta/v1beta1 + +// Replicate identical gRPC APIs + +//go:generate ../hack/duplicate_proto_type.sh apiextensions/fn/proto/v1/run_function.proto apiextensions/fn/proto/v1beta1 // Generate gRPC types and stubs. // @@ -70,12 +81,11 @@ limitations under the License. // (or protoc) to invoke them. //go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/grpc/cmd/protoc-gen-go-grpc -//go:generate go run github.com/bufbuild/buf/cmd/buf generate +//go:generate go run github.com/bufbuild/buf/cmd/buf@v1.31.0 generate package apis import ( - _ "github.com/bufbuild/buf/cmd/buf" //nolint:typecheck _ "github.com/jmattheis/goverter/cmd/goverter" //nolint:typecheck _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" //nolint:typecheck _ "google.golang.org/protobuf/cmd/protoc-gen-go" //nolint:typecheck diff --git a/apis/pkg/meta/v1/conversion.go b/apis/pkg/meta/v1/conversion.go index 98ff6c683..7880d49a4 100644 --- a/apis/pkg/meta/v1/conversion.go +++ b/apis/pkg/meta/v1/conversion.go @@ -21,3 +21,6 @@ func (p *Provider) Hub() {} // Hub marks this type as the conversion hub. func (c *Configuration) Hub() {} + +// Hub marks this type as the conversion hub. +func (f *Function) Hub() {} diff --git a/apis/pkg/meta/v1beta1/function_types.go b/apis/pkg/meta/v1/function_types.go similarity index 98% rename from apis/pkg/meta/v1beta1/function_types.go rename to apis/pkg/meta/v1/function_types.go index cd603b534..5a22b4732 100644 --- a/apis/pkg/meta/v1beta1/function_types.go +++ b/apis/pkg/meta/v1/function_types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/apis/pkg/meta/v1/interfaces.go b/apis/pkg/meta/v1/interfaces.go index 6b3c3b16e..7310b91be 100644 --- a/apis/pkg/meta/v1/interfaces.go +++ b/apis/pkg/meta/v1/interfaces.go @@ -16,8 +16,11 @@ limitations under the License. package v1 -var _ Pkg = &Configuration{} -var _ Pkg = &Provider{} +var ( + _ Pkg = &Configuration{} + _ Pkg = &Provider{} + _ Pkg = &Function{} +) // Pkg is a description of a Crossplane package. // +k8s:deepcopy-gen=false @@ -39,11 +42,21 @@ func (c *Configuration) GetDependencies() []Dependency { // GetCrossplaneConstraints gets the Provider package's Crossplane version // constraints. -func (c *Provider) GetCrossplaneConstraints() *CrossplaneConstraints { - return c.Spec.MetaSpec.Crossplane +func (p *Provider) GetCrossplaneConstraints() *CrossplaneConstraints { + return p.Spec.MetaSpec.Crossplane } // GetDependencies gets the Provider package's dependencies. -func (c *Provider) GetDependencies() []Dependency { - return c.Spec.MetaSpec.DependsOn +func (p *Provider) GetDependencies() []Dependency { + return p.Spec.MetaSpec.DependsOn +} + +// GetCrossplaneConstraints gets the Function package's Crossplane version constraints. +func (f *Function) GetCrossplaneConstraints() *CrossplaneConstraints { + return f.Spec.MetaSpec.Crossplane +} + +// GetDependencies gets the Function package's dependencies. +func (f *Function) GetDependencies() []Dependency { + return f.Spec.DependsOn } diff --git a/apis/pkg/meta/v1/register.go b/apis/pkg/meta/v1/register.go index 0ede3e8aa..0136ebcaa 100644 --- a/apis/pkg/meta/v1/register.go +++ b/apis/pkg/meta/v1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) @@ -56,7 +56,16 @@ var ( ConfigurationGroupVersionKind = SchemeGroupVersion.WithKind(ConfigurationKind) ) +// Function type metadata. +var ( + FunctionKind = reflect.TypeOf(Function{}).Name() + FunctionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionKind}.String() + FunctionKindAPIVersion = FunctionKind + "." + SchemeGroupVersion.String() + FunctionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionKind) +) + func init() { SchemeBuilder.Register(&Configuration{}) SchemeBuilder.Register(&Provider{}) + SchemeBuilder.Register(&Function{}) } diff --git a/apis/pkg/meta/v1/zz_generated.deepcopy.go b/apis/pkg/meta/v1/zz_generated.deepcopy.go index 96d2c4505..bc1d84595 100644 --- a/apis/pkg/meta/v1/zz_generated.deepcopy.go +++ b/apis/pkg/meta/v1/zz_generated.deepcopy.go @@ -139,6 +139,53 @@ func (in *Dependency) DeepCopy() *Dependency { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.MetaSpec.DeepCopyInto(&out.MetaSpec) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetaSpec) DeepCopyInto(out *MetaSpec) { *out = *in diff --git a/apis/pkg/meta/v1alpha1/register.go b/apis/pkg/meta/v1alpha1/register.go index 41f37e2c0..5829a58d1 100644 --- a/apis/pkg/meta/v1alpha1/register.go +++ b/apis/pkg/meta/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/meta/v1beta1/conversion.go b/apis/pkg/meta/v1beta1/conversion.go index 24fe9b14a..9dcea5362 100644 --- a/apis/pkg/meta/v1beta1/conversion.go +++ b/apis/pkg/meta/v1beta1/conversion.go @@ -16,5 +16,74 @@ limitations under the License. package v1beta1 -// Hub marks this type as the conversion hub. -func (p *Function) Hub() {} +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + + v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" +) + +const ( + errWrongConvertToFunction = "must convert to *v1.Function" + errWrongConvertFromFunction = "must convert from *v1.Function" +) + +// A ToHubConverter converts v1beta1 types to the 'hub' v1 type. +// +// goverter:converter +// goverter:name GeneratedToHubConverter +// goverter:extend ConvertObjectMeta +// goverter:output:file ./zz_generated.conversion.go +// goverter:output:package github.com/crossplane/crossplane/apis/pkg/meta/v1beta1 +// +k8s:deepcopy-gen=false +type ToHubConverter interface { + Function(in *Function) *v1.Function +} + +// A FromHubConverter converts v1beta1 types from the 'hub' v1 type. +// +// goverter:converter +// goverter:name GeneratedFromHubConverter +// goverter:extend ConvertObjectMeta +// goverter:output:file ./zz_generated.conversion.go +// goverter:output:package github.com/crossplane/crossplane/apis/pkg/meta/v1beta1 +// +k8s:deepcopy-gen=false +type FromHubConverter interface { + Function(in *v1.Function) *Function +} + +// ConvertObjectMeta 'converts' ObjectMeta by producing a deepcopy. This +// is necessary because goverter can't convert metav1.Time. It also prevents +// goverter generating code that is functionally identical to deepcopygen's. +func ConvertObjectMeta(in metav1.ObjectMeta) metav1.ObjectMeta { + out := in.DeepCopy() + return *out +} + +// ConvertTo converts this Function to the Hub version. +func (c *Function) ConvertTo(hub conversion.Hub) error { + out, ok := hub.(*v1.Function) + if !ok { + return errors.New(errWrongConvertToFunction) + } + + conv := &GeneratedToHubConverter{} + *out = *conv.Function(c) + + return nil +} + +// ConvertFrom converts this Function from the Hub version. +func (c *Function) ConvertFrom(hub conversion.Hub) error { + in, ok := hub.(*v1.Function) + if !ok { + return errors.New(errWrongConvertFromFunction) + } + + conv := &GeneratedFromHubConverter{} + *c = *conv.Function(in) + + return nil +} diff --git a/apis/pkg/meta/v1beta1/function_interfaces.go b/apis/pkg/meta/v1beta1/function_interfaces.go deleted file mode 100644 index aec145335..000000000 --- a/apis/pkg/meta/v1beta1/function_interfaces.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" -) - -// GetCrossplaneConstraints gets the Function package's Crossplane version constraints. -func (f *Function) GetCrossplaneConstraints() *v1.CrossplaneConstraints { - if f.Spec.MetaSpec.Crossplane == nil { - return nil - } - - cc := v1.CrossplaneConstraints{Version: f.Spec.MetaSpec.Crossplane.Version} - return &cc -} - -// GetDependencies gets the Function package's dependencies. -func (f *Function) GetDependencies() []v1.Dependency { - if f.Spec.MetaSpec.DependsOn == nil { - return []v1.Dependency{} - } - - d := make([]v1.Dependency, len(f.Spec.MetaSpec.DependsOn)) - for i, dep := range f.Spec.MetaSpec.DependsOn { - d[i] = v1.Dependency{ - Provider: dep.Provider, - Configuration: dep.Configuration, - Function: dep.Function, - Version: dep.Version, - } - } - - return d -} diff --git a/apis/pkg/meta/v1beta1/register.go b/apis/pkg/meta/v1beta1/register.go index d5675481d..909d27feb 100644 --- a/apis/pkg/meta/v1beta1/register.go +++ b/apis/pkg/meta/v1beta1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/meta/v1beta1/zz_generated.conversion.go b/apis/pkg/meta/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..74712ca99 --- /dev/null +++ b/apis/pkg/meta/v1beta1/zz_generated.conversion.go @@ -0,0 +1,161 @@ +// Code generated by github.com/jmattheis/goverter, DO NOT EDIT. +//go:build !goverter + +package v1beta1 + +import ( + v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type GeneratedFromHubConverter struct{} + +func (c *GeneratedFromHubConverter) Function(source *v1.Function) *Function { + var pV1beta1Function *Function + if source != nil { + var v1beta1Function Function + v1beta1Function.TypeMeta = c.v1TypeMetaToV1TypeMeta((*source).TypeMeta) + v1beta1Function.ObjectMeta = ConvertObjectMeta((*source).ObjectMeta) + v1beta1Function.Spec = c.v1FunctionSpecToV1beta1FunctionSpec((*source).Spec) + pV1beta1Function = &v1beta1Function + } + return pV1beta1Function +} +func (c *GeneratedFromHubConverter) pV1CrossplaneConstraintsToPV1beta1CrossplaneConstraints(source *v1.CrossplaneConstraints) *CrossplaneConstraints { + var pV1beta1CrossplaneConstraints *CrossplaneConstraints + if source != nil { + var v1beta1CrossplaneConstraints CrossplaneConstraints + v1beta1CrossplaneConstraints.Version = (*source).Version + pV1beta1CrossplaneConstraints = &v1beta1CrossplaneConstraints + } + return pV1beta1CrossplaneConstraints +} +func (c *GeneratedFromHubConverter) v1DependencyToV1beta1Dependency(source v1.Dependency) Dependency { + var v1beta1Dependency Dependency + var pString *string + if source.Provider != nil { + xstring := *source.Provider + pString = &xstring + } + v1beta1Dependency.Provider = pString + var pString2 *string + if source.Configuration != nil { + xstring2 := *source.Configuration + pString2 = &xstring2 + } + v1beta1Dependency.Configuration = pString2 + var pString3 *string + if source.Function != nil { + xstring3 := *source.Function + pString3 = &xstring3 + } + v1beta1Dependency.Function = pString3 + v1beta1Dependency.Version = source.Version + return v1beta1Dependency +} +func (c *GeneratedFromHubConverter) v1FunctionSpecToV1beta1FunctionSpec(source v1.FunctionSpec) FunctionSpec { + var v1beta1FunctionSpec FunctionSpec + v1beta1FunctionSpec.MetaSpec = c.v1MetaSpecToV1beta1MetaSpec(source.MetaSpec) + var pString *string + if source.Image != nil { + xstring := *source.Image + pString = &xstring + } + v1beta1FunctionSpec.Image = pString + return v1beta1FunctionSpec +} +func (c *GeneratedFromHubConverter) v1MetaSpecToV1beta1MetaSpec(source v1.MetaSpec) MetaSpec { + var v1beta1MetaSpec MetaSpec + v1beta1MetaSpec.Crossplane = c.pV1CrossplaneConstraintsToPV1beta1CrossplaneConstraints(source.Crossplane) + var v1beta1DependencyList []Dependency + if source.DependsOn != nil { + v1beta1DependencyList = make([]Dependency, len(source.DependsOn)) + for i := 0; i < len(source.DependsOn); i++ { + v1beta1DependencyList[i] = c.v1DependencyToV1beta1Dependency(source.DependsOn[i]) + } + } + v1beta1MetaSpec.DependsOn = v1beta1DependencyList + return v1beta1MetaSpec +} +func (c *GeneratedFromHubConverter) v1TypeMetaToV1TypeMeta(source v11.TypeMeta) v11.TypeMeta { + var v1TypeMeta v11.TypeMeta + v1TypeMeta.Kind = source.Kind + v1TypeMeta.APIVersion = source.APIVersion + return v1TypeMeta +} + +type GeneratedToHubConverter struct{} + +func (c *GeneratedToHubConverter) Function(source *Function) *v1.Function { + var pV1Function *v1.Function + if source != nil { + var v1Function v1.Function + v1Function.TypeMeta = c.v1TypeMetaToV1TypeMeta((*source).TypeMeta) + v1Function.ObjectMeta = ConvertObjectMeta((*source).ObjectMeta) + v1Function.Spec = c.v1beta1FunctionSpecToV1FunctionSpec((*source).Spec) + pV1Function = &v1Function + } + return pV1Function +} +func (c *GeneratedToHubConverter) pV1beta1CrossplaneConstraintsToPV1CrossplaneConstraints(source *CrossplaneConstraints) *v1.CrossplaneConstraints { + var pV1CrossplaneConstraints *v1.CrossplaneConstraints + if source != nil { + var v1CrossplaneConstraints v1.CrossplaneConstraints + v1CrossplaneConstraints.Version = (*source).Version + pV1CrossplaneConstraints = &v1CrossplaneConstraints + } + return pV1CrossplaneConstraints +} +func (c *GeneratedToHubConverter) v1TypeMetaToV1TypeMeta(source v11.TypeMeta) v11.TypeMeta { + var v1TypeMeta v11.TypeMeta + v1TypeMeta.Kind = source.Kind + v1TypeMeta.APIVersion = source.APIVersion + return v1TypeMeta +} +func (c *GeneratedToHubConverter) v1beta1DependencyToV1Dependency(source Dependency) v1.Dependency { + var v1Dependency v1.Dependency + var pString *string + if source.Provider != nil { + xstring := *source.Provider + pString = &xstring + } + v1Dependency.Provider = pString + var pString2 *string + if source.Configuration != nil { + xstring2 := *source.Configuration + pString2 = &xstring2 + } + v1Dependency.Configuration = pString2 + var pString3 *string + if source.Function != nil { + xstring3 := *source.Function + pString3 = &xstring3 + } + v1Dependency.Function = pString3 + v1Dependency.Version = source.Version + return v1Dependency +} +func (c *GeneratedToHubConverter) v1beta1FunctionSpecToV1FunctionSpec(source FunctionSpec) v1.FunctionSpec { + var v1FunctionSpec v1.FunctionSpec + v1FunctionSpec.MetaSpec = c.v1beta1MetaSpecToV1MetaSpec(source.MetaSpec) + var pString *string + if source.Image != nil { + xstring := *source.Image + pString = &xstring + } + v1FunctionSpec.Image = pString + return v1FunctionSpec +} +func (c *GeneratedToHubConverter) v1beta1MetaSpecToV1MetaSpec(source MetaSpec) v1.MetaSpec { + var v1MetaSpec v1.MetaSpec + v1MetaSpec.Crossplane = c.pV1beta1CrossplaneConstraintsToPV1CrossplaneConstraints(source.Crossplane) + var v1DependencyList []v1.Dependency + if source.DependsOn != nil { + v1DependencyList = make([]v1.Dependency, len(source.DependsOn)) + for i := 0; i < len(source.DependsOn); i++ { + v1DependencyList[i] = c.v1beta1DependencyToV1Dependency(source.DependsOn[i]) + } + } + v1MetaSpec.DependsOn = v1DependencyList + return v1MetaSpec +} diff --git a/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go b/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go index 3411301bd..7f2ad383c 100644 --- a/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go +++ b/apis/pkg/meta/v1beta1/zz_generated.deepcopy.go @@ -116,6 +116,36 @@ func (in *FunctionSpec) DeepCopy() *FunctionSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratedFromHubConverter) DeepCopyInto(out *GeneratedFromHubConverter) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratedFromHubConverter. +func (in *GeneratedFromHubConverter) DeepCopy() *GeneratedFromHubConverter { + if in == nil { + return nil + } + out := new(GeneratedFromHubConverter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GeneratedToHubConverter) DeepCopyInto(out *GeneratedToHubConverter) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GeneratedToHubConverter. +func (in *GeneratedToHubConverter) DeepCopy() *GeneratedToHubConverter { + if in == nil { + return nil + } + out := new(GeneratedToHubConverter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetaSpec) DeepCopyInto(out *MetaSpec) { *out = *in diff --git a/apis/pkg/meta/v1beta1/function_interfaces_test.go b/apis/pkg/meta/v1beta1/zz_generated.function_types.go similarity index 50% rename from apis/pkg/meta/v1beta1/function_interfaces_test.go rename to apis/pkg/meta/v1beta1/zz_generated.function_types.go index 9ddba9c7d..0698e1bcf 100644 --- a/apis/pkg/meta/v1beta1/function_interfaces_test.go +++ b/apis/pkg/meta/v1beta1/zz_generated.function_types.go @@ -14,10 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Generated from pkg/meta/v1/function_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + package v1beta1 import ( - v1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var _ v1.Pkg = &Function{} +// FunctionSpec specifies the configuration of a Function. +type FunctionSpec struct { + MetaSpec `json:",inline"` + + // Image is the packaged Function image. + Image *string `json:"image,omitempty"` +} + +// +kubebuilder:object:root=true + +// A Function is the description of a Crossplane Function package. +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec"` +} diff --git a/apis/pkg/pkg.go b/apis/pkg/pkg.go index 81cac3e51..14b868bd2 100644 --- a/apis/pkg/pkg.go +++ b/apis/pkg/pkg.go @@ -34,10 +34,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/pkg/v1/configuration_types.go b/apis/pkg/v1/configuration_types.go index 5d30cdd30..720680fc7 100644 --- a/apis/pkg/v1/configuration_types.go +++ b/apis/pkg/v1/configuration_types.go @@ -26,7 +26,12 @@ import ( // +genclient // +genclient:nonNamespaced -// Configuration is the CRD type for a request to add a configuration to Crossplane. +// A Configuration installs an OCI compatible Crossplane package, extending +// Crossplane with support for new kinds of CompositeResourceDefinitions and +// Compositions. +// +// Read the Crossplane documentation for +// [more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages). // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" @@ -67,7 +72,11 @@ type ConfigurationList struct { // +genclient // +genclient:nonNamespaced -// A ConfigurationRevision that has been added to Crossplane. +// A ConfigurationRevision represents a revision of a Configuration. Crossplane +// creates new revisions when there are changes to a Configuration. +// +// Crossplane creates and manages ConfigurationRevision. Don't directly edit +// ConfigurationRevisions. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/pkg/v1beta1/function_types.go b/apis/pkg/v1/function_types.go similarity index 80% rename from apis/pkg/v1beta1/function_types.go rename to apis/pkg/v1/function_types.go index 89b5eb6f7..aa2072e37 100644 --- a/apis/pkg/v1beta1/function_types.go +++ b/apis/pkg/v1/function_types.go @@ -14,27 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - - // TODO(negz): Ideally our v1beta1 package wouldn't import types from v1, as - // this strongly couples the types. This would make life difficult if we - // wanted to evolve this package in a different direction from the current - // v1 implementation. Unfortunately the package manager implementation - // requires any type that is reconciled as a package (or revision) to - // satisfy interfaces that involve returning v1 types. - v1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // +kubebuilder:object:root=true // +genclient // +genclient:nonNamespaced -// Function is the CRD type for a request to deploy a long-running Function. +// A Function installs an OCI compatible Crossplane package, extending +// Crossplane with support for a new kind of composition function. +// +// Read the Crossplane documentation for +// [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" @@ -52,15 +48,15 @@ type Function struct { // FunctionSpec specifies the configuration of a Function. type FunctionSpec struct { - v1.PackageSpec `json:",inline"` + PackageSpec `json:",inline"` - v1.PackageRuntimeSpec `json:",inline"` + PackageRuntimeSpec `json:",inline"` } // FunctionStatus represents the observed state of a Function. type FunctionStatus struct { xpv1.ConditionedStatus `json:",inline"` - v1.PackageStatus `json:",inline"` + PackageStatus `json:",inline"` } // +kubebuilder:object:root=true @@ -74,15 +70,19 @@ type FunctionList struct { // FunctionRevisionSpec specifies configuration for a FunctionRevision. type FunctionRevisionSpec struct { - v1.PackageRevisionSpec `json:",inline"` - v1.PackageRevisionRuntimeSpec `json:",inline"` + PackageRevisionSpec `json:",inline"` + PackageRevisionRuntimeSpec `json:",inline"` } // +kubebuilder:object:root=true // +genclient // +genclient:nonNamespaced -// A FunctionRevision that has been added to Crossplane. +// A FunctionRevision represents a revision of a Function. Crossplane +// creates new revisions when there are changes to the Function. +// +// Crossplane creates and manages FunctionRevisions. Don't directly edit +// FunctionRevisions. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" @@ -103,7 +103,7 @@ type FunctionRevision struct { // FunctionRevisionStatus represents the observed state of a FunctionRevision. type FunctionRevisionStatus struct { - v1.PackageRevisionStatus `json:",inline"` + PackageRevisionStatus `json:",inline"` // Endpoint is the gRPC endpoint where Crossplane will send // RunFunctionRequests. diff --git a/apis/pkg/v1/interfaces.go b/apis/pkg/v1/interfaces.go index 9a1c55638..3fcc4ab08 100644 --- a/apis/pkg/v1/interfaces.go +++ b/apis/pkg/v1/interfaces.go @@ -63,7 +63,7 @@ func RefNames(refs []corev1.LocalObjectReference) []string { // PackageWithRuntime is the interface satisfied by packages with runtime types. // +k8s:deepcopy-gen=false -type PackageWithRuntime interface { +type PackageWithRuntime interface { //nolint:interfacebloat // TODO(negz): Could this be composed of smaller interfaces? Package GetControllerConfigRef() *ControllerConfigReference @@ -79,7 +79,7 @@ type PackageWithRuntime interface { // Package is the interface satisfied by package types. // +k8s:deepcopy-gen=false -type Package interface { +type Package interface { //nolint:interfacebloat // TODO(negz): Could we break this up into smaller, composable interfaces? resource.Object resource.Conditioned @@ -110,7 +110,7 @@ type Package interface { SetCurrentIdentifier(r string) GetSkipDependencyResolution() *bool - SetSkipDependencyResolution(*bool) + SetSkipDependencyResolution(skip *bool) GetCommonLabels() map[string]string SetCommonLabels(l map[string]string) @@ -126,7 +126,7 @@ func (p *Provider) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *Provider) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -271,7 +271,7 @@ func (p *Configuration) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *Configuration) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -379,7 +379,7 @@ func (p *Configuration) SetCommonLabels(l map[string]string) { // PackageRevisionWithRuntime is the interface satisfied by revision of packages // with runtime types. // +k8s:deepcopy-gen=false -type PackageRevisionWithRuntime interface { +type PackageRevisionWithRuntime interface { //nolint:interfacebloat // TODO(negz): Could this be composed of smaller interfaces? PackageRevision GetControllerConfigRef() *ControllerConfigReference @@ -397,7 +397,7 @@ type PackageRevisionWithRuntime interface { // PackageRevision is the interface satisfied by package revision types. // +k8s:deepcopy-gen=false -type PackageRevision interface { +type PackageRevision interface { //nolint:interfacebloat // TODO(negz): Could we break this up into smaller, composable interfaces? resource.Object resource.Conditioned @@ -425,7 +425,7 @@ type PackageRevision interface { SetRevision(r int64) GetSkipDependencyResolution() *bool - SetSkipDependencyResolution(*bool) + SetSkipDependencyResolution(skip *bool) GetDependencyStatus() (found, installed, invalid int64) SetDependencyStatus(found, installed, invalid int64) @@ -444,7 +444,7 @@ func (p *ProviderRevision) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *ProviderRevision) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -601,7 +601,7 @@ func (p *ConfigurationRevision) SetConditions(c ...xpv1.Condition) { p.Status.SetConditions(c...) } -// CleanConditions removes all conditions +// CleanConditions removes all conditions. func (p *ConfigurationRevision) CleanConditions() { p.Status.Conditions = []xpv1.Condition{} } @@ -726,7 +726,6 @@ type PackageRevisionList interface { func (p *ProviderRevisionList) GetRevisions() []PackageRevision { prs := make([]PackageRevision, len(p.Items)) for i, r := range p.Items { - r := r // Pin range variable so we can take its address. prs[i] = &r } return prs @@ -736,7 +735,6 @@ func (p *ProviderRevisionList) GetRevisions() []PackageRevision { func (p *ConfigurationRevisionList) GetRevisions() []PackageRevision { prs := make([]PackageRevision, len(p.Items)) for i, r := range p.Items { - r := r // Pin range variable so we can take its address. prs[i] = &r } return prs @@ -762,3 +760,312 @@ func GetSecretNameWithSuffix(name, suffix string) *string { return &s } + +// GetCondition of this Function. +func (f *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return f.Status.GetCondition(ct) +} + +// SetConditions of this Function. +func (f *Function) SetConditions(c ...xpv1.Condition) { + f.Status.SetConditions(c...) +} + +// CleanConditions removes all conditions. +func (f *Function) CleanConditions() { + f.Status.Conditions = []xpv1.Condition{} +} + +// GetSource of this Function. +func (f *Function) GetSource() string { + return f.Spec.Package +} + +// SetSource of this Function. +func (f *Function) SetSource(s string) { + f.Spec.Package = s +} + +// GetActivationPolicy of this Function. +func (f *Function) GetActivationPolicy() *RevisionActivationPolicy { + return f.Spec.RevisionActivationPolicy +} + +// SetActivationPolicy of this Function. +func (f *Function) SetActivationPolicy(a *RevisionActivationPolicy) { + f.Spec.RevisionActivationPolicy = a +} + +// GetPackagePullSecrets of this Function. +func (f *Function) GetPackagePullSecrets() []corev1.LocalObjectReference { + return f.Spec.PackagePullSecrets +} + +// SetPackagePullSecrets of this Function. +func (f *Function) SetPackagePullSecrets(s []corev1.LocalObjectReference) { + f.Spec.PackagePullSecrets = s +} + +// GetPackagePullPolicy of this Function. +func (f *Function) GetPackagePullPolicy() *corev1.PullPolicy { + return f.Spec.PackagePullPolicy +} + +// SetPackagePullPolicy of this Function. +func (f *Function) SetPackagePullPolicy(i *corev1.PullPolicy) { + f.Spec.PackagePullPolicy = i +} + +// GetRevisionHistoryLimit of this Function. +func (f *Function) GetRevisionHistoryLimit() *int64 { + return f.Spec.RevisionHistoryLimit +} + +// SetRevisionHistoryLimit of this Function. +func (f *Function) SetRevisionHistoryLimit(l *int64) { + f.Spec.RevisionHistoryLimit = l +} + +// GetIgnoreCrossplaneConstraints of this Function. +func (f *Function) GetIgnoreCrossplaneConstraints() *bool { + return f.Spec.IgnoreCrossplaneConstraints +} + +// SetIgnoreCrossplaneConstraints of this Function. +func (f *Function) SetIgnoreCrossplaneConstraints(b *bool) { + f.Spec.IgnoreCrossplaneConstraints = b +} + +// GetControllerConfigRef of this Function. +func (f *Function) GetControllerConfigRef() *ControllerConfigReference { + return nil +} + +// SetControllerConfigRef of this Function. +func (f *Function) SetControllerConfigRef(*ControllerConfigReference) {} + +// GetRuntimeConfigRef of this Function. +func (f *Function) GetRuntimeConfigRef() *RuntimeConfigReference { + return f.Spec.RuntimeConfigReference +} + +// SetRuntimeConfigRef of this Function. +func (f *Function) SetRuntimeConfigRef(r *RuntimeConfigReference) { + f.Spec.RuntimeConfigReference = r +} + +// GetCurrentRevision of this Function. +func (f *Function) GetCurrentRevision() string { + return f.Status.CurrentRevision +} + +// SetCurrentRevision of this Function. +func (f *Function) SetCurrentRevision(s string) { + f.Status.CurrentRevision = s +} + +// GetSkipDependencyResolution of this Function. +func (f *Function) GetSkipDependencyResolution() *bool { + return f.Spec.SkipDependencyResolution +} + +// SetSkipDependencyResolution of this Function. +func (f *Function) SetSkipDependencyResolution(b *bool) { + f.Spec.SkipDependencyResolution = b +} + +// GetCurrentIdentifier of this Function. +func (f *Function) GetCurrentIdentifier() string { + return f.Status.CurrentIdentifier +} + +// SetCurrentIdentifier of this Function. +func (f *Function) SetCurrentIdentifier(s string) { + f.Status.CurrentIdentifier = s +} + +// GetCommonLabels of this Function. +func (f *Function) GetCommonLabels() map[string]string { + return f.Spec.CommonLabels +} + +// SetCommonLabels of this Function. +func (f *Function) SetCommonLabels(l map[string]string) { + f.Spec.CommonLabels = l +} + +// GetTLSServerSecretName of this Function. +func (f *Function) GetTLSServerSecretName() *string { + return GetSecretNameWithSuffix(f.GetName(), TLSServerSecretNameSuffix) +} + +// GetTLSClientSecretName of this Function. +func (f *Function) GetTLSClientSecretName() *string { + return nil +} + +// GetCondition of this FunctionRevision. +func (r *FunctionRevision) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return r.Status.GetCondition(ct) +} + +// SetConditions of this FunctionRevision. +func (r *FunctionRevision) SetConditions(c ...xpv1.Condition) { + r.Status.SetConditions(c...) +} + +// CleanConditions removes all conditions. +func (r *FunctionRevision) CleanConditions() { + r.Status.Conditions = []xpv1.Condition{} +} + +// GetObjects of this FunctionRevision. +func (r *FunctionRevision) GetObjects() []xpv1.TypedReference { + return r.Status.ObjectRefs +} + +// SetObjects of this FunctionRevision. +func (r *FunctionRevision) SetObjects(c []xpv1.TypedReference) { + r.Status.ObjectRefs = c +} + +// GetSource of this FunctionRevision. +func (r *FunctionRevision) GetSource() string { + return r.Spec.Package +} + +// SetSource of this FunctionRevision. +func (r *FunctionRevision) SetSource(s string) { + r.Spec.Package = s +} + +// GetPackagePullSecrets of this FunctionRevision. +func (r *FunctionRevision) GetPackagePullSecrets() []corev1.LocalObjectReference { + return r.Spec.PackagePullSecrets +} + +// SetPackagePullSecrets of this FunctionRevision. +func (r *FunctionRevision) SetPackagePullSecrets(s []corev1.LocalObjectReference) { + r.Spec.PackagePullSecrets = s +} + +// GetPackagePullPolicy of this FunctionRevision. +func (r *FunctionRevision) GetPackagePullPolicy() *corev1.PullPolicy { + return r.Spec.PackagePullPolicy +} + +// SetPackagePullPolicy of this FunctionRevision. +func (r *FunctionRevision) SetPackagePullPolicy(i *corev1.PullPolicy) { + r.Spec.PackagePullPolicy = i +} + +// GetDesiredState of this FunctionRevision. +func (r *FunctionRevision) GetDesiredState() PackageRevisionDesiredState { + return r.Spec.DesiredState +} + +// SetDesiredState of this FunctionRevision. +func (r *FunctionRevision) SetDesiredState(s PackageRevisionDesiredState) { + r.Spec.DesiredState = s +} + +// GetRevision of this FunctionRevision. +func (r *FunctionRevision) GetRevision() int64 { + return r.Spec.Revision +} + +// SetRevision of this FunctionRevision. +func (r *FunctionRevision) SetRevision(rev int64) { + r.Spec.Revision = rev +} + +// GetDependencyStatus of this v. +func (r *FunctionRevision) GetDependencyStatus() (found, installed, invalid int64) { + return r.Status.FoundDependencies, r.Status.InstalledDependencies, r.Status.InvalidDependencies +} + +// SetDependencyStatus of this FunctionRevision. +func (r *FunctionRevision) SetDependencyStatus(found, installed, invalid int64) { + r.Status.FoundDependencies = found + r.Status.InstalledDependencies = installed + r.Status.InvalidDependencies = invalid +} + +// GetIgnoreCrossplaneConstraints of this FunctionRevision. +func (r *FunctionRevision) GetIgnoreCrossplaneConstraints() *bool { + return r.Spec.IgnoreCrossplaneConstraints +} + +// SetIgnoreCrossplaneConstraints of this FunctionRevision. +func (r *FunctionRevision) SetIgnoreCrossplaneConstraints(b *bool) { + r.Spec.IgnoreCrossplaneConstraints = b +} + +// GetControllerConfigRef of this FunctionRevision. +func (r *FunctionRevision) GetControllerConfigRef() *ControllerConfigReference { + return r.Spec.ControllerConfigReference +} + +// SetControllerConfigRef of this FunctionRevision. +func (r *FunctionRevision) SetControllerConfigRef(ref *ControllerConfigReference) { + r.Spec.ControllerConfigReference = ref +} + +// GetRuntimeConfigRef of this FunctionRevision. +func (r *FunctionRevision) GetRuntimeConfigRef() *RuntimeConfigReference { + return r.Spec.RuntimeConfigReference +} + +// SetRuntimeConfigRef of this FunctionRevision. +func (r *FunctionRevision) SetRuntimeConfigRef(ref *RuntimeConfigReference) { + r.Spec.RuntimeConfigReference = ref +} + +// GetSkipDependencyResolution of this FunctionRevision. +func (r *FunctionRevision) GetSkipDependencyResolution() *bool { + return r.Spec.SkipDependencyResolution +} + +// SetSkipDependencyResolution of this FunctionRevision. +func (r *FunctionRevision) SetSkipDependencyResolution(b *bool) { + r.Spec.SkipDependencyResolution = b +} + +// GetTLSServerSecretName of this FunctionRevision. +func (r *FunctionRevision) GetTLSServerSecretName() *string { + return r.Spec.TLSServerSecretName +} + +// SetTLSServerSecretName of this FunctionRevision. +func (r *FunctionRevision) SetTLSServerSecretName(s *string) { + r.Spec.TLSServerSecretName = s +} + +// GetTLSClientSecretName of this FunctionRevision. +func (r *FunctionRevision) GetTLSClientSecretName() *string { + return r.Spec.TLSClientSecretName +} + +// SetTLSClientSecretName of this FunctionRevision. +func (r *FunctionRevision) SetTLSClientSecretName(s *string) { + r.Spec.TLSClientSecretName = s +} + +// GetCommonLabels of this FunctionRevision. +func (r *FunctionRevision) GetCommonLabels() map[string]string { + return r.Spec.CommonLabels +} + +// SetCommonLabels of this FunctionRevision. +func (r *FunctionRevision) SetCommonLabels(l map[string]string) { + r.Spec.CommonLabels = l +} + +// GetRevisions of this ConfigurationRevisionList. +func (p *FunctionRevisionList) GetRevisions() []PackageRevision { + prs := make([]PackageRevision, len(p.Items)) + for i, r := range p.Items { + prs[i] = &r + } + return prs +} diff --git a/apis/pkg/v1/interfaces_test.go b/apis/pkg/v1/interfaces_test.go index f6c2e3ae7..f078fbc9d 100644 --- a/apis/pkg/v1/interfaces_test.go +++ b/apis/pkg/v1/interfaces_test.go @@ -16,11 +16,20 @@ limitations under the License. package v1 -var _ Package = &Provider{} -var _ Package = &Configuration{} +var ( + _ Package = &Provider{} + _ Package = &Configuration{} + _ Package = &Function{} +) -var _ PackageRevision = &ProviderRevision{} -var _ PackageRevision = &ConfigurationRevision{} +var ( + _ PackageRevision = &ProviderRevision{} + _ PackageRevision = &ConfigurationRevision{} + _ PackageRevision = &FunctionRevision{} +) -var _ PackageRevisionList = &ProviderRevisionList{} -var _ PackageRevisionList = &ConfigurationRevisionList{} +var ( + _ PackageRevisionList = &ProviderRevisionList{} + _ PackageRevisionList = &ConfigurationRevisionList{} + _ PackageRevisionList = &FunctionRevisionList{} +) diff --git a/apis/pkg/v1/package_types.go b/apis/pkg/v1/package_types.go index 070080a74..c74c03cdc 100644 --- a/apis/pkg/v1/package_types.go +++ b/apis/pkg/v1/package_types.go @@ -70,7 +70,7 @@ type PackageSpec struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional CommonLabels map[string]string `json:"commonLabels,omitempty"` } diff --git a/apis/pkg/v1/provider_types.go b/apis/pkg/v1/provider_types.go index cd043d972..48dbf699a 100644 --- a/apis/pkg/v1/provider_types.go +++ b/apis/pkg/v1/provider_types.go @@ -26,7 +26,11 @@ import ( // +genclient // +genclient:nonNamespaced -// Provider is the CRD type for a request to add a provider to Crossplane. +// A Provider installs an OCI compatible Crossplane package, extending +// Crossplane with support for new kinds of managed resources. +// +// Read the Crossplane documentation for +// [more information about Providers](https://docs.crossplane.io/latest/concepts/providers). // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" @@ -74,7 +78,11 @@ type ProviderRevisionSpec struct { // +genclient // +genclient:nonNamespaced -// A ProviderRevision that has been added to Crossplane. +// A ProviderRevision represents a revision of a Provider. Crossplane +// creates new revisions when there are changes to a Provider. +// +// Crossplane creates and manages ProviderRevisions. Don't directly edit +// ProviderRevisions. // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" diff --git a/apis/pkg/v1/register.go b/apis/pkg/v1/register.go index 56800528f..2094c8949 100644 --- a/apis/pkg/v1/register.go +++ b/apis/pkg/v1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to the scheme + // AddToScheme adds all registered types to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) @@ -72,9 +72,27 @@ var ( ProviderRevisionGroupVersionKind = SchemeGroupVersion.WithKind(ProviderRevisionKind) ) +// Function type metadata. +var ( + FunctionKind = reflect.TypeOf(Function{}).Name() + FunctionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionKind}.String() + FunctionKindAPIVersion = FunctionKind + "." + SchemeGroupVersion.String() + FunctionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionKind) +) + +// FunctionRevision type metadata. +var ( + FunctionRevisionKind = reflect.TypeOf(FunctionRevision{}).Name() + FunctionRevisionGroupKind = schema.GroupKind{Group: Group, Kind: FunctionRevisionKind}.String() + FunctionRevisionKindAPIVersion = FunctionRevisionKind + "." + SchemeGroupVersion.String() + FunctionRevisionGroupVersionKind = SchemeGroupVersion.WithKind(FunctionRevisionKind) +) + func init() { SchemeBuilder.Register(&Configuration{}, &ConfigurationList{}) SchemeBuilder.Register(&ConfigurationRevision{}, &ConfigurationRevisionList{}) SchemeBuilder.Register(&Provider{}, &ProviderList{}) SchemeBuilder.Register(&ProviderRevision{}, &ProviderRevisionList{}) + SchemeBuilder.Register(&Function{}, &FunctionList{}) + SchemeBuilder.Register(&FunctionRevision{}, &FunctionRevisionList{}) } diff --git a/apis/pkg/v1/revision_types.go b/apis/pkg/v1/revision_types.go index bde9a37b7..c48f38f53 100644 --- a/apis/pkg/v1/revision_types.go +++ b/apis/pkg/v1/revision_types.go @@ -78,7 +78,7 @@ type PackageRevisionSpec struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional CommonLabels map[string]string `json:"commonLabels,omitempty"` } diff --git a/apis/pkg/v1/zz_generated.deepcopy.go b/apis/pkg/v1/zz_generated.deepcopy.go index b70272c9c..a9a86a0fb 100644 --- a/apis/pkg/v1/zz_generated.deepcopy.go +++ b/apis/pkg/v1/zz_generated.deepcopy.go @@ -208,6 +208,191 @@ func (in *ControllerReference) DeepCopy() *ControllerReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Function) DeepCopyInto(out *Function) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function. +func (in *Function) DeepCopy() *Function { + if in == nil { + return nil + } + out := new(Function) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Function) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionList) DeepCopyInto(out *FunctionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Function, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList. +func (in *FunctionList) DeepCopy() *FunctionList { + if in == nil { + return nil + } + out := new(FunctionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevision) DeepCopyInto(out *FunctionRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevision. +func (in *FunctionRevision) DeepCopy() *FunctionRevision { + if in == nil { + return nil + } + out := new(FunctionRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionList) DeepCopyInto(out *FunctionRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FunctionRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionList. +func (in *FunctionRevisionList) DeepCopy() *FunctionRevisionList { + if in == nil { + return nil + } + out := new(FunctionRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FunctionRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionSpec) DeepCopyInto(out *FunctionRevisionSpec) { + *out = *in + in.PackageRevisionSpec.DeepCopyInto(&out.PackageRevisionSpec) + in.PackageRevisionRuntimeSpec.DeepCopyInto(&out.PackageRevisionRuntimeSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionSpec. +func (in *FunctionRevisionSpec) DeepCopy() *FunctionRevisionSpec { + if in == nil { + return nil + } + out := new(FunctionRevisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionRevisionStatus) DeepCopyInto(out *FunctionRevisionStatus) { + *out = *in + in.PackageRevisionStatus.DeepCopyInto(&out.PackageRevisionStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionRevisionStatus. +func (in *FunctionRevisionStatus) DeepCopy() *FunctionRevisionStatus { + if in == nil { + return nil + } + out := new(FunctionRevisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) { + *out = *in + in.PackageSpec.DeepCopyInto(&out.PackageSpec) + in.PackageRuntimeSpec.DeepCopyInto(&out.PackageRuntimeSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec. +func (in *FunctionSpec) DeepCopy() *FunctionSpec { + if in == nil { + return nil + } + out := new(FunctionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FunctionStatus) DeepCopyInto(out *FunctionStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) + out.PackageStatus = in.PackageStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionStatus. +func (in *FunctionStatus) DeepCopy() *FunctionStatus { + if in == nil { + return nil + } + out := new(FunctionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PackageRevisionRuntimeSpec) DeepCopyInto(out *PackageRevisionRuntimeSpec) { *out = *in diff --git a/apis/pkg/v1alpha1/config.go b/apis/pkg/v1alpha1/config.go index 0a82c8b62..2bab1086e 100644 --- a/apis/pkg/v1alpha1/config.go +++ b/apis/pkg/v1alpha1/config.go @@ -107,7 +107,7 @@ type ControllerConfigSpec struct { // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // More info: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md // This is a beta feature as of Kubernetes v1.14. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty"` @@ -157,7 +157,7 @@ type PodObjectMeta struct { // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // +optional Annotations map[string]string `json:"annotations,omitempty"` @@ -166,7 +166,7 @@ type PodObjectMeta struct { // labels on the pod, not the pod selector. Labels will be merged // with internal labels used by crossplane, and labels with a // crossplane.io key might be overwritten. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Labels map[string]string `json:"labels,omitempty"` } @@ -175,10 +175,14 @@ type PodObjectMeta struct { // +genclient // +genclient:nonNamespaced -// ControllerConfig is the CRD type for a packaged controller configuration. -// Deprecated: This API is replaced by DeploymentRuntimeConfig, and is scheduled -// to be removed in a future release. See the design doc for more details: -// https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md +// A ControllerConfig applies settings to controllers like Provider pods. +// Deprecated: Use the +// [DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration) +// instead. +// +// Read the +// [Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md) +// design document for more details. // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster // +kubebuilder:deprecatedversion:warning="ControllerConfig.pkg.crossplane.io/v1alpha1 is deprecated. Use DeploymentRuntimeConfig from pkg.crossplane.io/v1beta1 instead." diff --git a/apis/pkg/v1alpha1/register.go b/apis/pkg/v1alpha1/register.go index 63509f215..f18ca3e75 100644 --- a/apis/pkg/v1alpha1/register.go +++ b/apis/pkg/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to the scheme + // AddToScheme adds all registered types to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/v1beta1/deployment_runtime_config_types.go b/apis/pkg/v1beta1/deployment_runtime_config_types.go index a1aeca774..b1f33fe11 100644 --- a/apis/pkg/v1beta1/deployment_runtime_config_types.go +++ b/apis/pkg/v1beta1/deployment_runtime_config_types.go @@ -30,7 +30,7 @@ type ObjectMeta struct { // Annotations is an unstructured key value map stored with a resource that // may be set by external tools to store and retrieve arbitrary metadata. // They are not queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ // +optional Annotations map[string]string `json:"annotations,omitempty"` @@ -38,7 +38,7 @@ type ObjectMeta struct { // (scope and select) objects. Labels will be merged with internal labels // used by crossplane, and labels with a crossplane.io key might be // overwritten. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // +optional Labels map[string]string `json:"labels,omitempty"` } @@ -87,10 +87,11 @@ type DeploymentRuntimeConfigSpec struct { // +genclient // +genclient:nonNamespaced -// A DeploymentRuntimeConfig is used to configure the package runtime when -// the package uses a runtime and the package manager is running with -// --package-runtime=Deployment (the default). See the following design doc for -// more details:https://github.com/crossplane/crossplane/blob/91edeae3fcac96c6c8a1759a723981eea4bb77e4/design/one-pager-package-runtime-config.md#migration-from-controllerconfig +// The DeploymentRuntimeConfig provides settings for the Kubernetes Deployment +// of a Provider or composition function package. +// +// Read the Crossplane documentation for +// [more information about DeploymentRuntimeConfigs](https://docs.crossplane.io/latest/concepts/providers/#runtime-configuration). // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:resource:scope=Cluster,categories={crossplane} type DeploymentRuntimeConfig struct { diff --git a/apis/pkg/v1beta1/function_interfaces.go b/apis/pkg/v1beta1/function_interfaces.go deleted file mode 100644 index 444c49d78..000000000 --- a/apis/pkg/v1beta1/function_interfaces.go +++ /dev/null @@ -1,335 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - - xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" - - v1 "github.com/crossplane/crossplane/apis/pkg/v1" -) - -// GetCondition of this Function. -func (f *Function) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return f.Status.GetCondition(ct) -} - -// SetConditions of this Function. -func (f *Function) SetConditions(c ...xpv1.Condition) { - f.Status.SetConditions(c...) -} - -// CleanConditions removes all conditions -func (f *Function) CleanConditions() { - f.Status.Conditions = []xpv1.Condition{} -} - -// GetSource of this Function. -func (f *Function) GetSource() string { - return f.Spec.Package -} - -// SetSource of this Function. -func (f *Function) SetSource(s string) { - f.Spec.Package = s -} - -// GetActivationPolicy of this Function. -func (f *Function) GetActivationPolicy() *v1.RevisionActivationPolicy { - return f.Spec.RevisionActivationPolicy -} - -// SetActivationPolicy of this Function. -func (f *Function) SetActivationPolicy(a *v1.RevisionActivationPolicy) { - f.Spec.RevisionActivationPolicy = a -} - -// GetPackagePullSecrets of this Function. -func (f *Function) GetPackagePullSecrets() []corev1.LocalObjectReference { - return f.Spec.PackagePullSecrets -} - -// SetPackagePullSecrets of this Function. -func (f *Function) SetPackagePullSecrets(s []corev1.LocalObjectReference) { - f.Spec.PackagePullSecrets = s -} - -// GetPackagePullPolicy of this Function. -func (f *Function) GetPackagePullPolicy() *corev1.PullPolicy { - return f.Spec.PackagePullPolicy -} - -// SetPackagePullPolicy of this Function. -func (f *Function) SetPackagePullPolicy(i *corev1.PullPolicy) { - f.Spec.PackagePullPolicy = i -} - -// GetRevisionHistoryLimit of this Function. -func (f *Function) GetRevisionHistoryLimit() *int64 { - return f.Spec.RevisionHistoryLimit -} - -// SetRevisionHistoryLimit of this Function. -func (f *Function) SetRevisionHistoryLimit(l *int64) { - f.Spec.RevisionHistoryLimit = l -} - -// GetIgnoreCrossplaneConstraints of this Function. -func (f *Function) GetIgnoreCrossplaneConstraints() *bool { - return f.Spec.IgnoreCrossplaneConstraints -} - -// SetIgnoreCrossplaneConstraints of this Function. -func (f *Function) SetIgnoreCrossplaneConstraints(b *bool) { - f.Spec.IgnoreCrossplaneConstraints = b -} - -// GetControllerConfigRef of this Function. -func (f *Function) GetControllerConfigRef() *v1.ControllerConfigReference { - return nil -} - -// SetControllerConfigRef of this Function. -func (f *Function) SetControllerConfigRef(*v1.ControllerConfigReference) {} - -// GetRuntimeConfigRef of this Function. -func (f *Function) GetRuntimeConfigRef() *v1.RuntimeConfigReference { - return f.Spec.RuntimeConfigReference -} - -// SetRuntimeConfigRef of this Function. -func (f *Function) SetRuntimeConfigRef(r *v1.RuntimeConfigReference) { - f.Spec.RuntimeConfigReference = r -} - -// GetCurrentRevision of this Function. -func (f *Function) GetCurrentRevision() string { - return f.Status.CurrentRevision -} - -// SetCurrentRevision of this Function. -func (f *Function) SetCurrentRevision(s string) { - f.Status.CurrentRevision = s -} - -// GetSkipDependencyResolution of this Function. -func (f *Function) GetSkipDependencyResolution() *bool { - return f.Spec.SkipDependencyResolution -} - -// SetSkipDependencyResolution of this Function. -func (f *Function) SetSkipDependencyResolution(b *bool) { - f.Spec.SkipDependencyResolution = b -} - -// GetCurrentIdentifier of this Function. -func (f *Function) GetCurrentIdentifier() string { - return f.Status.CurrentIdentifier -} - -// SetCurrentIdentifier of this Function. -func (f *Function) SetCurrentIdentifier(s string) { - f.Status.CurrentIdentifier = s -} - -// GetCommonLabels of this Function. -func (f *Function) GetCommonLabels() map[string]string { - return f.Spec.CommonLabels -} - -// SetCommonLabels of this Function. -func (f *Function) SetCommonLabels(l map[string]string) { - f.Spec.CommonLabels = l -} - -// GetTLSServerSecretName of this Function. -func (f *Function) GetTLSServerSecretName() *string { - return v1.GetSecretNameWithSuffix(f.GetName(), v1.TLSServerSecretNameSuffix) -} - -// GetTLSClientSecretName of this Function. -func (f *Function) GetTLSClientSecretName() *string { - return nil -} - -// GetCondition of this FunctionRevision. -func (r *FunctionRevision) GetCondition(ct xpv1.ConditionType) xpv1.Condition { - return r.Status.GetCondition(ct) -} - -// SetConditions of this FunctionRevision. -func (r *FunctionRevision) SetConditions(c ...xpv1.Condition) { - r.Status.SetConditions(c...) -} - -// CleanConditions removes all conditions -func (r *FunctionRevision) CleanConditions() { - r.Status.Conditions = []xpv1.Condition{} -} - -// GetObjects of this FunctionRevision. -func (r *FunctionRevision) GetObjects() []xpv1.TypedReference { - return r.Status.ObjectRefs -} - -// SetObjects of this FunctionRevision. -func (r *FunctionRevision) SetObjects(c []xpv1.TypedReference) { - r.Status.ObjectRefs = c -} - -// GetSource of this FunctionRevision. -func (r *FunctionRevision) GetSource() string { - return r.Spec.Package -} - -// SetSource of this FunctionRevision. -func (r *FunctionRevision) SetSource(s string) { - r.Spec.Package = s -} - -// GetPackagePullSecrets of this FunctionRevision. -func (r *FunctionRevision) GetPackagePullSecrets() []corev1.LocalObjectReference { - return r.Spec.PackagePullSecrets -} - -// SetPackagePullSecrets of this FunctionRevision. -func (r *FunctionRevision) SetPackagePullSecrets(s []corev1.LocalObjectReference) { - r.Spec.PackagePullSecrets = s -} - -// GetPackagePullPolicy of this FunctionRevision. -func (r *FunctionRevision) GetPackagePullPolicy() *corev1.PullPolicy { - return r.Spec.PackagePullPolicy -} - -// SetPackagePullPolicy of this FunctionRevision. -func (r *FunctionRevision) SetPackagePullPolicy(i *corev1.PullPolicy) { - r.Spec.PackagePullPolicy = i -} - -// GetDesiredState of this FunctionRevision. -func (r *FunctionRevision) GetDesiredState() v1.PackageRevisionDesiredState { - return r.Spec.DesiredState -} - -// SetDesiredState of this FunctionRevision. -func (r *FunctionRevision) SetDesiredState(s v1.PackageRevisionDesiredState) { - r.Spec.DesiredState = s -} - -// GetRevision of this FunctionRevision. -func (r *FunctionRevision) GetRevision() int64 { - return r.Spec.Revision -} - -// SetRevision of this FunctionRevision. -func (r *FunctionRevision) SetRevision(rev int64) { - r.Spec.Revision = rev -} - -// GetDependencyStatus of this v. -func (r *FunctionRevision) GetDependencyStatus() (found, installed, invalid int64) { - return r.Status.FoundDependencies, r.Status.InstalledDependencies, r.Status.InvalidDependencies -} - -// SetDependencyStatus of this FunctionRevision. -func (r *FunctionRevision) SetDependencyStatus(found, installed, invalid int64) { - r.Status.FoundDependencies = found - r.Status.InstalledDependencies = installed - r.Status.InvalidDependencies = invalid -} - -// GetIgnoreCrossplaneConstraints of this FunctionRevision. -func (r *FunctionRevision) GetIgnoreCrossplaneConstraints() *bool { - return r.Spec.IgnoreCrossplaneConstraints -} - -// SetIgnoreCrossplaneConstraints of this FunctionRevision. -func (r *FunctionRevision) SetIgnoreCrossplaneConstraints(b *bool) { - r.Spec.IgnoreCrossplaneConstraints = b -} - -// GetControllerConfigRef of this FunctionRevision. -func (r *FunctionRevision) GetControllerConfigRef() *v1.ControllerConfigReference { - return r.Spec.ControllerConfigReference -} - -// SetControllerConfigRef of this FunctionRevision. -func (r *FunctionRevision) SetControllerConfigRef(ref *v1.ControllerConfigReference) { - r.Spec.ControllerConfigReference = ref -} - -// GetRuntimeConfigRef of this FunctionRevision. -func (r *FunctionRevision) GetRuntimeConfigRef() *v1.RuntimeConfigReference { - return r.Spec.RuntimeConfigReference -} - -// SetRuntimeConfigRef of this FunctionRevision. -func (r *FunctionRevision) SetRuntimeConfigRef(ref *v1.RuntimeConfigReference) { - r.Spec.RuntimeConfigReference = ref -} - -// GetSkipDependencyResolution of this FunctionRevision. -func (r *FunctionRevision) GetSkipDependencyResolution() *bool { - return r.Spec.SkipDependencyResolution -} - -// SetSkipDependencyResolution of this FunctionRevision. -func (r *FunctionRevision) SetSkipDependencyResolution(b *bool) { - r.Spec.SkipDependencyResolution = b -} - -// GetTLSServerSecretName of this FunctionRevision. -func (r *FunctionRevision) GetTLSServerSecretName() *string { - return r.Spec.TLSServerSecretName -} - -// SetTLSServerSecretName of this FunctionRevision. -func (r *FunctionRevision) SetTLSServerSecretName(s *string) { - r.Spec.TLSServerSecretName = s -} - -// GetTLSClientSecretName of this FunctionRevision. -func (r *FunctionRevision) GetTLSClientSecretName() *string { - return r.Spec.TLSClientSecretName -} - -// SetTLSClientSecretName of this FunctionRevision. -func (r *FunctionRevision) SetTLSClientSecretName(s *string) { - r.Spec.TLSClientSecretName = s -} - -// GetCommonLabels of this FunctionRevision. -func (r *FunctionRevision) GetCommonLabels() map[string]string { - return r.Spec.CommonLabels -} - -// SetCommonLabels of this FunctionRevision. -func (r *FunctionRevision) SetCommonLabels(l map[string]string) { - r.Spec.CommonLabels = l -} - -// GetRevisions of this ConfigurationRevisionList. -func (p *FunctionRevisionList) GetRevisions() []v1.PackageRevision { - prs := make([]v1.PackageRevision, len(p.Items)) - for i, r := range p.Items { - r := r // Pin range variable so we can take its address. - prs[i] = &r - } - return prs -} diff --git a/apis/pkg/v1beta1/function_interfaces_test.go b/apis/pkg/v1beta1/function_interfaces_test.go deleted file mode 100644 index 783149ee4..000000000 --- a/apis/pkg/v1beta1/function_interfaces_test.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import v1 "github.com/crossplane/crossplane/apis/pkg/v1" - -var _ v1.Package = &Function{} -var _ v1.PackageRevision = &FunctionRevision{} -var _ v1.PackageRevisionList = &FunctionRevisionList{} diff --git a/apis/pkg/v1beta1/lock.go b/apis/pkg/v1beta1/lock.go index 195fd0273..c6c102146 100644 --- a/apis/pkg/v1beta1/lock.go +++ b/apis/pkg/v1beta1/lock.go @@ -22,8 +22,10 @@ import ( "github.com/crossplane/crossplane/internal/dag" ) -var _ dag.Node = &Dependency{} -var _ dag.Node = &LockPackage{} +var ( + _ dag.Node = &Dependency{} + _ dag.Node = &LockPackage{} +) // A PackageType is a type of package. type PackageType string @@ -58,7 +60,6 @@ type LockPackage struct { func ToNodes(pkgs ...LockPackage) []dag.Node { nodes := make([]dag.Node, len(pkgs)) for i, r := range pkgs { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes @@ -73,7 +74,6 @@ func (l *LockPackage) Identifier() string { func (l *LockPackage) Neighbors() []dag.Node { nodes := make([]dag.Node, len(l.Dependencies)) for i, r := range l.Dependencies { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes diff --git a/apis/pkg/v1beta1/register.go b/apis/pkg/v1beta1/register.go index a89fe8936..9c70db5f9 100644 --- a/apis/pkg/v1beta1/register.go +++ b/apis/pkg/v1beta1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to the scheme + // AddToScheme adds all registered types to the scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/pkg/v1beta1/zz_generated.deepcopy.go b/apis/pkg/v1beta1/zz_generated.deepcopy.go index 449104538..90cfe44fb 100644 --- a/apis/pkg/v1beta1/zz_generated.deepcopy.go +++ b/apis/pkg/v1beta1/zz_generated.deepcopy.go @@ -21,10 +21,43 @@ limitations under the License. package v1beta1 import ( + commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfigReference) DeepCopyInto(out *ControllerConfigReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigReference. +func (in *ControllerConfigReference) DeepCopy() *ControllerConfigReference { + if in == nil { + return nil + } + out := new(ControllerConfigReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerReference) DeepCopyInto(out *ControllerReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerReference. +func (in *ControllerReference) DeepCopy() *ControllerReference { + if in == nil { + return nil + } + out := new(ControllerReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Dependency) DeepCopyInto(out *Dependency) { *out = *in @@ -456,6 +489,219 @@ func (in *ObjectMeta) DeepCopy() *ObjectMeta { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRevisionRuntimeSpec) DeepCopyInto(out *PackageRevisionRuntimeSpec) { + *out = *in + in.PackageRuntimeSpec.DeepCopyInto(&out.PackageRuntimeSpec) + if in.TLSServerSecretName != nil { + in, out := &in.TLSServerSecretName, &out.TLSServerSecretName + *out = new(string) + **out = **in + } + if in.TLSClientSecretName != nil { + in, out := &in.TLSClientSecretName, &out.TLSClientSecretName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRevisionRuntimeSpec. +func (in *PackageRevisionRuntimeSpec) DeepCopy() *PackageRevisionRuntimeSpec { + if in == nil { + return nil + } + out := new(PackageRevisionRuntimeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRevisionSpec) DeepCopyInto(out *PackageRevisionSpec) { + *out = *in + if in.PackagePullSecrets != nil { + in, out := &in.PackagePullSecrets, &out.PackagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.PackagePullPolicy != nil { + in, out := &in.PackagePullPolicy, &out.PackagePullPolicy + *out = new(corev1.PullPolicy) + **out = **in + } + if in.IgnoreCrossplaneConstraints != nil { + in, out := &in.IgnoreCrossplaneConstraints, &out.IgnoreCrossplaneConstraints + *out = new(bool) + **out = **in + } + if in.SkipDependencyResolution != nil { + in, out := &in.SkipDependencyResolution, &out.SkipDependencyResolution + *out = new(bool) + **out = **in + } + if in.CommonLabels != nil { + in, out := &in.CommonLabels, &out.CommonLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRevisionSpec. +func (in *PackageRevisionSpec) DeepCopy() *PackageRevisionSpec { + if in == nil { + return nil + } + out := new(PackageRevisionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRevisionStatus) DeepCopyInto(out *PackageRevisionStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) + if in.ObjectRefs != nil { + in, out := &in.ObjectRefs, &out.ObjectRefs + *out = make([]commonv1.TypedReference, len(*in)) + copy(*out, *in) + } + if in.PermissionRequests != nil { + in, out := &in.PermissionRequests, &out.PermissionRequests + *out = make([]rbacv1.PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRevisionStatus. +func (in *PackageRevisionStatus) DeepCopy() *PackageRevisionStatus { + if in == nil { + return nil + } + out := new(PackageRevisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageRuntimeSpec) DeepCopyInto(out *PackageRuntimeSpec) { + *out = *in + if in.ControllerConfigReference != nil { + in, out := &in.ControllerConfigReference, &out.ControllerConfigReference + *out = new(ControllerConfigReference) + **out = **in + } + if in.RuntimeConfigReference != nil { + in, out := &in.RuntimeConfigReference, &out.RuntimeConfigReference + *out = new(RuntimeConfigReference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageRuntimeSpec. +func (in *PackageRuntimeSpec) DeepCopy() *PackageRuntimeSpec { + if in == nil { + return nil + } + out := new(PackageRuntimeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageSpec) DeepCopyInto(out *PackageSpec) { + *out = *in + if in.RevisionActivationPolicy != nil { + in, out := &in.RevisionActivationPolicy, &out.RevisionActivationPolicy + *out = new(RevisionActivationPolicy) + **out = **in + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int64) + **out = **in + } + if in.PackagePullSecrets != nil { + in, out := &in.PackagePullSecrets, &out.PackagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.PackagePullPolicy != nil { + in, out := &in.PackagePullPolicy, &out.PackagePullPolicy + *out = new(corev1.PullPolicy) + **out = **in + } + if in.IgnoreCrossplaneConstraints != nil { + in, out := &in.IgnoreCrossplaneConstraints, &out.IgnoreCrossplaneConstraints + *out = new(bool) + **out = **in + } + if in.SkipDependencyResolution != nil { + in, out := &in.SkipDependencyResolution, &out.SkipDependencyResolution + *out = new(bool) + **out = **in + } + if in.CommonLabels != nil { + in, out := &in.CommonLabels, &out.CommonLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageSpec. +func (in *PackageSpec) DeepCopy() *PackageSpec { + if in == nil { + return nil + } + out := new(PackageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PackageStatus) DeepCopyInto(out *PackageStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackageStatus. +func (in *PackageStatus) DeepCopy() *PackageStatus { + if in == nil { + return nil + } + out := new(PackageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeConfigReference) DeepCopyInto(out *RuntimeConfigReference) { + *out = *in + if in.APIVersion != nil { + in, out := &in.APIVersion, &out.APIVersion + *out = new(string) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeConfigReference. +func (in *RuntimeConfigReference) DeepCopy() *RuntimeConfigReference { + if in == nil { + return nil + } + out := new(RuntimeConfigReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceAccountTemplate) DeepCopyInto(out *ServiceAccountTemplate) { *out = *in diff --git a/apis/pkg/v1beta1/zz_generated.function_types.go b/apis/pkg/v1beta1/zz_generated.function_types.go new file mode 100644 index 000000000..c4d244d7f --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.function_types.go @@ -0,0 +1,120 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/function_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// +kubebuilder:object:root=true +// +genclient +// +genclient:nonNamespaced + +// A Function installs an OCI compatible Crossplane package, extending +// Crossplane with support for a new kind of composition function. +// +// Read the Crossplane documentation for +// [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="INSTALLED",type="string",JSONPath=".status.conditions[?(@.type=='Installed')].status" +// +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" +// +kubebuilder:printcolumn:name="PACKAGE",type="string",JSONPath=".spec.package" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,pkg} +type Function struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionSpec `json:"spec,omitempty"` + Status FunctionStatus `json:"status,omitempty"` +} + +// FunctionSpec specifies the configuration of a Function. +type FunctionSpec struct { + PackageSpec `json:",inline"` + + PackageRuntimeSpec `json:",inline"` +} + +// FunctionStatus represents the observed state of a Function. +type FunctionStatus struct { + xpv1.ConditionedStatus `json:",inline"` + PackageStatus `json:",inline"` +} + +// +kubebuilder:object:root=true + +// FunctionList contains a list of Function. +type FunctionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Function `json:"items"` +} + +// FunctionRevisionSpec specifies configuration for a FunctionRevision. +type FunctionRevisionSpec struct { + PackageRevisionSpec `json:",inline"` + PackageRevisionRuntimeSpec `json:",inline"` +} + +// +kubebuilder:object:root=true +// +genclient +// +genclient:nonNamespaced + +// A FunctionRevision represents a revision of a Function. Crossplane +// creates new revisions when there are changes to the Function. +// +// Crossplane creates and manages FunctionRevisions. Don't directly edit +// FunctionRevisions. +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="HEALTHY",type="string",JSONPath=".status.conditions[?(@.type=='Healthy')].status" +// +kubebuilder:printcolumn:name="REVISION",type="string",JSONPath=".spec.revision" +// +kubebuilder:printcolumn:name="IMAGE",type="string",JSONPath=".spec.image" +// +kubebuilder:printcolumn:name="STATE",type="string",JSONPath=".spec.desiredState" +// +kubebuilder:printcolumn:name="DEP-FOUND",type="string",JSONPath=".status.foundDependencies" +// +kubebuilder:printcolumn:name="DEP-INSTALLED",type="string",JSONPath=".status.installedDependencies" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,pkgrev} +type FunctionRevision struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FunctionRevisionSpec `json:"spec,omitempty"` + Status FunctionRevisionStatus `json:"status,omitempty"` +} + +// FunctionRevisionStatus represents the observed state of a FunctionRevision. +type FunctionRevisionStatus struct { + PackageRevisionStatus `json:",inline"` + + // Endpoint is the gRPC endpoint where Crossplane will send + // RunFunctionRequests. + Endpoint string `json:"endpoint,omitempty"` +} + +// +kubebuilder:object:root=true + +// FunctionRevisionList contains a list of FunctionRevision. +type FunctionRevisionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FunctionRevision `json:"items"` +} diff --git a/apis/pkg/v1beta1/zz_generated.package_runtime_types.go b/apis/pkg/v1beta1/zz_generated.package_runtime_types.go new file mode 100644 index 000000000..17134004c --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.package_runtime_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/package_runtime_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +// PackageRuntimeSpec specifies configuration for the runtime of a package. +// Only used by packages that uses a runtime, i.e. by providers and functions +// but not for configurations. +type PackageRuntimeSpec struct { + // ControllerConfigRef references a ControllerConfig resource that will be + // used to configure the packaged controller Deployment. + // Deprecated: Use RuntimeConfigReference instead. + // +optional + ControllerConfigReference *ControllerConfigReference `json:"controllerConfigRef,omitempty"` + // RuntimeConfigRef references a RuntimeConfig resource that will be used + // to configure the package runtime. + // +optional + // +kubebuilder:default={"name": "default"} + RuntimeConfigReference *RuntimeConfigReference `json:"runtimeConfigRef,omitempty"` +} + +// PackageRevisionRuntimeSpec specifies configuration for the runtime of a +// package revision. Only used by packages that uses a runtime, i.e. by +// providers and functions but not for configurations. +type PackageRevisionRuntimeSpec struct { + PackageRuntimeSpec `json:",inline"` + // TLSServerSecretName is the name of the TLS Secret that stores server + // certificates of the Provider. + // +optional + TLSServerSecretName *string `json:"tlsServerSecretName,omitempty"` + + // TLSClientSecretName is the name of the TLS Secret that stores client + // certificates of the Provider. + // +optional + TLSClientSecretName *string `json:"tlsClientSecretName,omitempty"` +} + +// A ControllerConfigReference to a ControllerConfig resource that will be used +// to configure the packaged controller Deployment. +type ControllerConfigReference struct { + // Name of the ControllerConfig. + Name string `json:"name"` +} + +// A RuntimeConfigReference to a runtime config resource that will be used +// to configure the package runtime. +type RuntimeConfigReference struct { + // API version of the referent. + // +optional + // +kubebuilder:default="pkg.crossplane.io/v1beta1" + APIVersion *string `json:"apiVersion,omitempty"` + // Kind of the referent. + // +optional + // +kubebuilder:default="DeploymentRuntimeConfig" + Kind *string `json:"kind,omitempty"` + // Name of the RuntimeConfig. + Name string `json:"name"` +} diff --git a/apis/pkg/v1beta1/zz_generated.package_types.go b/apis/pkg/v1beta1/zz_generated.package_types.go new file mode 100644 index 000000000..aad03a3bd --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.package_types.go @@ -0,0 +1,94 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/package_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +import corev1 "k8s.io/api/core/v1" + +// RevisionActivationPolicy indicates how a package should activate its +// revisions. +type RevisionActivationPolicy string + +// PackageSpec specifies the desired state of a Package. +type PackageSpec struct { + // Package is the name of the package that is being requested. + Package string `json:"package"` + + // RevisionActivationPolicy specifies how the package controller should + // update from one revision to the next. Options are Automatic or Manual. + // Default is Automatic. + // +optional + // +kubebuilder:default=Automatic + RevisionActivationPolicy *RevisionActivationPolicy `json:"revisionActivationPolicy,omitempty"` + + // RevisionHistoryLimit dictates how the package controller cleans up old + // inactive package revisions. + // Defaults to 1. Can be disabled by explicitly setting to 0. + // +optional + // +kubebuilder:default=1 + RevisionHistoryLimit *int64 `json:"revisionHistoryLimit,omitempty"` + + // PackagePullSecrets are named secrets in the same namespace that can be used + // to fetch packages from private registries. + // +optional + PackagePullSecrets []corev1.LocalObjectReference `json:"packagePullSecrets,omitempty"` + + // PackagePullPolicy defines the pull policy for the package. + // Default is IfNotPresent. + // +optional + // +kubebuilder:default=IfNotPresent + PackagePullPolicy *corev1.PullPolicy `json:"packagePullPolicy,omitempty"` + + // IgnoreCrossplaneConstraints indicates to the package manager whether to + // honor Crossplane version constrains specified by the package. + // Default is false. + // +optional + // +kubebuilder:default=false + IgnoreCrossplaneConstraints *bool `json:"ignoreCrossplaneConstraints,omitempty"` + + // SkipDependencyResolution indicates to the package manager whether to skip + // resolving dependencies for a package. Setting this value to true may have + // unintended consequences. + // Default is false. + // +optional + // +kubebuilder:default=false + SkipDependencyResolution *bool `json:"skipDependencyResolution,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + CommonLabels map[string]string `json:"commonLabels,omitempty"` +} + +// PackageStatus represents the observed state of a Package. +type PackageStatus struct { + // CurrentRevision is the name of the current package revision. It will + // reflect the most up to date revision, whether it has been activated or + // not. + CurrentRevision string `json:"currentRevision,omitempty"` + + // CurrentIdentifier is the most recent package source that was used to + // produce a revision. The package manager uses this field to determine + // whether to check for package updates for a given source when + // packagePullPolicy is set to IfNotPresent. Manually removing this field + // will cause the package manager to check that the current revision is + // correct for the given package source. + CurrentIdentifier string `json:"currentIdentifier,omitempty"` +} diff --git a/apis/pkg/v1beta1/zz_generated.revision_types.go b/apis/pkg/v1beta1/zz_generated.revision_types.go new file mode 100644 index 000000000..6fb2135e0 --- /dev/null +++ b/apis/pkg/v1beta1/zz_generated.revision_types.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated from pkg/v1/revision_types.go by ../hack/duplicate_api_type.sh. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// PackageRevisionDesiredState is the desired state of the package revision. +type PackageRevisionDesiredState string + +const ( + // PackageRevisionActive is an active package revision. + PackageRevisionActive PackageRevisionDesiredState = "Active" + + // PackageRevisionInactive is an inactive package revision. + PackageRevisionInactive PackageRevisionDesiredState = "Inactive" +) + +// PackageRevisionSpec specifies the desired state of a PackageRevision. +type PackageRevisionSpec struct { + // DesiredState of the PackageRevision. Can be either Active or Inactive. + DesiredState PackageRevisionDesiredState `json:"desiredState"` + + // Package image used by install Pod to extract package contents. + Package string `json:"image"` + + // PackagePullSecrets are named secrets in the same namespace that can be + // used to fetch packages from private registries. They are also applied to + // any images pulled for the package, such as a provider's controller image. + // +optional + PackagePullSecrets []corev1.LocalObjectReference `json:"packagePullSecrets,omitempty"` + + // PackagePullPolicy defines the pull policy for the package. It is also + // applied to any images pulled for the package, such as a provider's + // controller image. + // Default is IfNotPresent. + // +optional + // +kubebuilder:default=IfNotPresent + PackagePullPolicy *corev1.PullPolicy `json:"packagePullPolicy,omitempty"` + + // Revision number. Indicates when the revision will be garbage collected + // based on the parent's RevisionHistoryLimit. + Revision int64 `json:"revision"` + + // IgnoreCrossplaneConstraints indicates to the package manager whether to + // honor Crossplane version constrains specified by the package. + // Default is false. + // +optional + // +kubebuilder:default=false + IgnoreCrossplaneConstraints *bool `json:"ignoreCrossplaneConstraints,omitempty"` + + // SkipDependencyResolution indicates to the package manager whether to skip + // resolving dependencies for a package. Setting this value to true may have + // unintended consequences. + // Default is false. + // +optional + // +kubebuilder:default=false + SkipDependencyResolution *bool `json:"skipDependencyResolution,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // +optional + CommonLabels map[string]string `json:"commonLabels,omitempty"` +} + +// PackageRevisionStatus represents the observed state of a PackageRevision. +type PackageRevisionStatus struct { + xpv1.ConditionedStatus `json:",inline"` + + // References to objects owned by PackageRevision. + ObjectRefs []xpv1.TypedReference `json:"objectRefs,omitempty"` + + // Dependency information. + FoundDependencies int64 `json:"foundDependencies,omitempty"` + InstalledDependencies int64 `json:"installedDependencies,omitempty"` + InvalidDependencies int64 `json:"invalidDependencies,omitempty"` + + // PermissionRequests made by this package. The package declares that its + // controller needs these permissions to run. The RBAC manager is + // responsible for granting them. + PermissionRequests []rbacv1.PolicyRule `json:"permissionRequests,omitempty"` +} + +// A ControllerReference references the controller (e.g. Deployment), if any, +// that is responsible for reconciling the types a package revision installs. +type ControllerReference struct { + // Name of the controller. + Name string `json:"name"` +} diff --git a/apis/secrets/secrets.go b/apis/secrets/secrets.go index c2089d76d..66e59c595 100644 --- a/apis/secrets/secrets.go +++ b/apis/secrets/secrets.go @@ -30,10 +30,10 @@ func init() { ) } -// AddToSchemes may be used to add all resources defined in the project to a Scheme +// AddToSchemes may be used to add all resources defined in the project to a Scheme. var AddToSchemes runtime.SchemeBuilder -// AddToScheme adds all Resources to the Scheme +// AddToScheme adds all Resources to the Scheme. func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } diff --git a/apis/secrets/v1alpha1/register.go b/apis/secrets/v1alpha1/register.go index 96259d67f..8bb63b194 100644 --- a/apis/secrets/v1alpha1/register.go +++ b/apis/secrets/v1alpha1/register.go @@ -30,13 +30,13 @@ const ( ) var ( - // SchemeGroupVersion is group version used to register these objects + // SchemeGroupVersion is group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - // AddToScheme adds all registered types to scheme + // AddToScheme adds all registered types to scheme. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/apis/secrets/v1alpha1/storeconfig_types.go b/apis/secrets/v1alpha1/storeconfig_types.go index 12442760c..33f3b3b1d 100644 --- a/apis/secrets/v1alpha1/storeconfig_types.go +++ b/apis/secrets/v1alpha1/storeconfig_types.go @@ -29,7 +29,8 @@ type StoreConfigSpec struct { // +kubebuilder:object:root=true -// A StoreConfig configures how Crossplane controllers should store connection details. +// A StoreConfig configures how Crossplane controllers should store connection +// details in an external secret store. // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="TYPE",type="string",JSONPath=".spec.type" // +kubebuilder:printcolumn:name="DEFAULT-SCOPE",type="string",JSONPath=".spec.defaultScope" @@ -43,14 +44,14 @@ type StoreConfig struct { // +kubebuilder:object:root=true -// StoreConfigList contains a list of StoreConfig +// StoreConfigList contains a list of StoreConfig. type StoreConfigList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []StoreConfig `json:"items"` } -// GetStoreConfig returns SecretStoreConfig +// GetStoreConfig returns SecretStoreConfig. func (in *StoreConfig) GetStoreConfig() xpv1.SecretStoreConfig { return in.Spec.SecretStoreConfig } diff --git a/build b/build deleted file mode 160000 index 89a10765a..000000000 --- a/build +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 89a10765abcb93a7aa8182bdd8bddc37b95cd5d2 diff --git a/cluster/charts/crossplane/README.md b/cluster/charts/crossplane/README.md index 5b162359b..87b10e946 100644 --- a/cluster/charts/crossplane/README.md +++ b/cluster/charts/crossplane/README.md @@ -71,16 +71,18 @@ and their default values. | `customAnnotations` | Add custom `annotations` to the Crossplane pod deployment. | `{}` | | `customLabels` | Add custom `labels` to the Crossplane pod deployment. | `{}` | | `deploymentStrategy` | The deployment strategy for the Crossplane and RBAC Manager pods. | `"RollingUpdate"` | +| `dnsPolicy` | Specify the `dnsPolicy` to be used by the Crossplane pod. | `""` | | `extraEnvVarsCrossplane` | Add custom environmental variables to the Crossplane pod deployment. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` | | `extraEnvVarsRBACManager` | Add custom environmental variables to the RBAC Manager pod deployment. Replaces any `.` in a variable name with `_`. For example, `SAMPLE.KEY=value1` becomes `SAMPLE_KEY=value1`. | `{}` | | `extraObjects` | To add arbitrary Kubernetes Objects during a Helm Install | `[]` | | `extraVolumeMountsCrossplane` | Add custom `volumeMounts` to the Crossplane pod. | `{}` | | `extraVolumesCrossplane` | Add custom `volumes` to the Crossplane pod. | `{}` | -| `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. | `false` | +| `function.packages` | A list of Function packages to install | `[]` | +| `hostNetwork` | Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. | `false` | | `image.pullPolicy` | The image pull policy used for Crossplane and RBAC Manager pods. | `"IfNotPresent"` | -| `image.repository` | Repository for the Crossplane pod image. | `"xpkg.upbound.io/crossplane/crossplane"` | +| `image.repository` | Repository for the Crossplane pod image. | `"xpkg.upbound.io/upbound/crossplane"` | | `image.tag` | The Crossplane image tag. Defaults to the value of `appVersion` in `Chart.yaml`. | `""` | -| `imagePullSecrets` | The imagePullSecret names to add to the Crossplane ServiceAccount. | `{}` | +| `imagePullSecrets` | The imagePullSecret names to add to the Crossplane ServiceAccount. | `[]` | | `leaderElection` | Enable [leader election](https://docs.crossplane.io/latest/concepts/pods/#leader-election) for the Crossplane pod. | `true` | | `metrics.enabled` | Enable Prometheus path, port and scrape annotations and expose port 8080 for both the Crossplane and RBAC Manager pods. | `false` | | `nodeSelector` | Add `nodeSelectors` to the Crossplane pod deployment. | `{}` | @@ -100,11 +102,12 @@ and their default values. | `rbacManager.replicas` | The number of RBAC Manager pod `replicas` to deploy. | `1` | | `rbacManager.skipAggregatedClusterRoles` | Don't install aggregated Crossplane ClusterRoles. | `false` | | `rbacManager.tolerations` | Add `tolerations` to the RBAC Manager pod deployment. | `[]` | +| `rbacManager.topologySpreadConstraints` | Add `topologySpreadConstraints` to the RBAC Manager pod deployment. | `[]` | | `registryCaBundleConfig.key` | The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. | `""` | | `registryCaBundleConfig.name` | The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. | `""` | | `replicas` | The number of Crossplane pod `replicas` to deploy. | `1` | -| `resourcesCrossplane.limits.cpu` | CPU resource limits for the Crossplane pod. | `"100m"` | -| `resourcesCrossplane.limits.memory` | Memory resource limits for the Crossplane pod. | `"512Mi"` | +| `resourcesCrossplane.limits.cpu` | CPU resource limits for the Crossplane pod. | `"500m"` | +| `resourcesCrossplane.limits.memory` | Memory resource limits for the Crossplane pod. | `"1024Mi"` | | `resourcesCrossplane.requests.cpu` | CPU resource requests for the Crossplane pod. | `"100m"` | | `resourcesCrossplane.requests.memory` | Memory resource requests for the Crossplane pod. | `"256Mi"` | | `resourcesRBACManager.limits.cpu` | CPU resource limits for the RBAC Manager pod. | `"100m"` | @@ -119,8 +122,10 @@ and their default values. | `securityContextRBACManager.readOnlyRootFilesystem` | Set the RBAC Manager pod root file system as read-only. | `true` | | `securityContextRBACManager.runAsGroup` | The group ID used by the RBAC Manager pod. | `65532` | | `securityContextRBACManager.runAsUser` | The user ID used by the RBAC Manager pod. | `65532` | +| `service.customAnnotations` | Configure annotations on the service object. Only enabled when webhooks.enabled = true | `{}` | | `serviceAccount.customAnnotations` | Add custom `annotations` to the Crossplane ServiceAccount. | `{}` | | `tolerations` | Add `tolerations` to the Crossplane pod deployment. | `[]` | +| `topologySpreadConstraints` | Add `topologySpreadConstraints` to the Crossplane pod deployment. | `[]` | | `webhooks.enabled` | Enable webhooks for Crossplane and installed Provider packages. | `true` | ### Command Line diff --git a/cluster/charts/crossplane/templates/deployment.yaml b/cluster/charts/crossplane/templates/deployment.yaml index 8bde3f5e4..f42c2396d 100644 --- a/cluster/charts/crossplane/templates/deployment.yaml +++ b/cluster/charts/crossplane/templates/deployment.yaml @@ -59,6 +59,10 @@ spec: - --configuration - "{{ $arg }}" {{- end }} + {{- range $arg := .Values.function.packages }} + - --function + - "{{ $arg }}" + {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy }} name: {{ .Chart.Name }}-init resources: @@ -239,3 +243,9 @@ spec: {{- if .Values.affinity }} affinity: {{ toYaml .Values.affinity | nindent 8 }} {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.topologySpreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ . }} + {{- end }} diff --git a/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml b/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml index c94915d9c..342895ebb 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-deployment.yaml @@ -64,11 +64,13 @@ spec: resourceFieldRef: containerName: {{ .Chart.Name }}-init resource: limits.cpu + divisor: "1" - name: GOMEMLIMIT valueFrom: resourceFieldRef: containerName: {{ .Chart.Name }}-init resource: limits.memory + divisor: "1" containers: - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}" args: @@ -97,11 +99,13 @@ spec: resourceFieldRef: containerName: {{ .Chart.Name }} resource: limits.cpu + divisor: "1" - name: GOMEMLIMIT valueFrom: resourceFieldRef: containerName: {{ .Chart.Name }} resource: limits.memory + divisor: "1" - name: LEADER_ELECTION value: "{{ .Values.rbacManager.leaderElection }}" {{- range $key, $value := .Values.extraEnvVarsRBACManager }} @@ -114,6 +118,9 @@ spec: {{- if .Values.rbacManager.tolerations }} tolerations: {{ toYaml .Values.rbacManager.tolerations | nindent 6 }} {{- end }} + {{- if .Values.rbacManager.topologySpreadConstraints }} + topologySpreadConstraints: {{ toYaml .Values.rbacManager.topologySpreadConstraints | nindent 6 }} + {{- end }} {{- if .Values.rbacManager.affinity }} affinity: {{ toYaml .Values.rbacManager.affinity | nindent 8 }} {{- end }} diff --git a/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml b/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml index 2ddd200c7..c8ad21be5 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-managed-clusterroles.yaml @@ -103,6 +103,10 @@ rules: - pkg.crossplane.io resources: ["*"] verbs: ["*"] +- apiGroups: + - secrets.crossplane.io + resources: ["*"] + verbs: ["*"] # Crossplane administrators have access to view CRDs in order to debug XRDs. - apiGroups: [apiextensions.k8s.io] resources: [customresourcedefinitions] @@ -139,6 +143,10 @@ rules: - pkg.crossplane.io resources: ["*"] verbs: ["*"] +- apiGroups: + - secrets.crossplane.io + resources: ["*"] + verbs: ["*"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -166,6 +174,10 @@ rules: - pkg.crossplane.io resources: ["*"] verbs: [get, list, watch] +- apiGroups: + - secrets.crossplane.io + resources: ["*"] + verbs: [get, list, watch] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml b/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml index ae00f94ad..fd1dcc977 100644 --- a/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml +++ b/cluster/charts/crossplane/templates/rbac-manager-serviceaccount.yaml @@ -7,9 +7,9 @@ metadata: labels: app: {{ template "crossplane.name" . }} {{- include "crossplane.labels" . | indent 4 }} -{{- if .Values.imagePullSecrets }} +{{- with .Values.imagePullSecrets }} imagePullSecrets: -{{- range $index, $secret := .Values.imagePullSecrets }} +{{- range $index, $secret := . }} - name: {{ $secret }} {{- end }} {{- end }} diff --git a/cluster/charts/crossplane/templates/service.yaml b/cluster/charts/crossplane/templates/service.yaml index d4ca47a64..e4ba77201 100644 --- a/cluster/charts/crossplane/templates/service.yaml +++ b/cluster/charts/crossplane/templates/service.yaml @@ -8,6 +8,12 @@ metadata: app: {{ template "crossplane.name" . }} release: {{ .Release.Name }} {{- include "crossplane.labels" . | indent 4 }} + annotations: + {{- with .Values.service.customAnnotations }} + {{- range $key, $value := . }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} spec: selector: app: {{ template "crossplane.name" . }} diff --git a/cluster/charts/crossplane/templates/serviceaccount.yaml b/cluster/charts/crossplane/templates/serviceaccount.yaml index 66d948c8e..fecec467a 100644 --- a/cluster/charts/crossplane/templates/serviceaccount.yaml +++ b/cluster/charts/crossplane/templates/serviceaccount.yaml @@ -9,9 +9,9 @@ metadata: {{- with .Values.serviceAccount.customAnnotations }} annotations: {{ toYaml . | nindent 4 }} {{- end }} -{{- if .Values.imagePullSecrets }} +{{- with .Values.imagePullSecrets }} imagePullSecrets: -{{- range $index, $secret := .Values.imagePullSecrets }} +{{- range $index, $secret := . }} - name: {{ $secret }} {{- end }} {{ end }} diff --git a/cluster/charts/crossplane/values.yaml b/cluster/charts/crossplane/values.yaml index 586b47dcb..c27992287 100755 --- a/cluster/charts/crossplane/values.yaml +++ b/cluster/charts/crossplane/values.yaml @@ -21,10 +21,15 @@ nodeSelector: {} tolerations: [] # -- Add `affinities` to the Crossplane pod deployment. affinity: {} +# -- Add `topologySpreadConstraints` to the Crossplane pod deployment. +topologySpreadConstraints: [] -# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. +# -- Enable `hostNetwork` for the Crossplane deployment. Caution: enabling `hostNetwork` grants the Crossplane Pod access to the host network namespace. Consider setting `dnsPolicy` to `ClusterFirstWithHostNet`. hostNetwork: false +# -- Specify the `dnsPolicy` to be used by the Crossplane pod. +dnsPolicy: "" + # -- Add custom `labels` to the Crossplane pod deployment. customLabels: {} @@ -48,8 +53,12 @@ configuration: # -- A list of Configuration packages to install. packages: [] +function: + # -- A list of Function packages to install + packages: [] + # -- The imagePullSecret names to add to the Crossplane ServiceAccount. -imagePullSecrets: {} +imagePullSecrets: [] registryCaBundleConfig: # -- The ConfigMap name containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. @@ -57,6 +66,10 @@ registryCaBundleConfig: # -- The ConfigMap key containing a custom CA bundle to enable fetching packages from registries with unknown or untrusted certificates. key: "" +service: + # -- Configure annotations on the service object. Only enabled when webhooks.enabled = true + customAnnotations: {} + webhooks: # -- Enable webhooks for Crossplane and installed Provider packages. enabled: true @@ -78,6 +91,8 @@ rbacManager: tolerations: [] # -- Add `affinities` to the RBAC Manager pod deployment. affinity: {} + # -- Add `topologySpreadConstraints` to the RBAC Manager pod deployment. + topologySpreadConstraints: [] # -- The PriorityClass name to apply to the Crossplane and RBAC Manager pods. priorityClassName: "" @@ -85,9 +100,9 @@ priorityClassName: "" resourcesCrossplane: limits: # -- CPU resource limits for the Crossplane pod. - cpu: 100m + cpu: 500m # -- Memory resource limits for the Crossplane pod. - memory: 512Mi + memory: 1024Mi requests: # -- CPU resource requests for the Crossplane pod. cpu: 100m diff --git a/cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml new file mode 100644 index 000000000..57e3dd1e1 --- /dev/null +++ b/cluster/crd-patches/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -0,0 +1,15 @@ +# For reasons that aren't immediately obvious, updating k8s.io/code-generator +# from v0.29.x to v0.30 triggers a variant of the below issue. As far as I can +# tell, this is the only way to work around it. The below fields are list map +# keys, but aren't required in the generated CRD. +# https://github.com/kubernetes-sigs/controller-tools/issues/444 + +- op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/deploymentTemplate/properties/spec/properties/template/properties/spec/properties/hostAliases/items/required + value: + - ip + +- op: add + path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/deploymentTemplate/properties/spec/properties/template/properties/spec/properties/imagePullSecrets/items/required + value: + - name \ No newline at end of file diff --git a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml index 602690af6..b71780bd2 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -32,9 +33,12 @@ spec: schema: openAPIV3Schema: description: |- - A CompositeResourceDefinition defines a new kind of composite infrastructure - resource. The new resource is composed of other composite or managed - infrastructure resources. + A CompositeResourceDefinition defines the schema for a new custom Kubernetes + API. + + + Read the Crossplane documentation for + [more information about CustomResourceDefinitions](https://docs.crossplane.io/latest/concepts/composite-resource-definitions). properties: apiVersion: description: |- @@ -76,6 +80,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic kind: description: |- kind is the serialized kind of the resource. It is normally CamelCase and singular. @@ -100,6 +105,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic singular: description: singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. @@ -108,6 +114,9 @@ spec: - kind - plural type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf connectionSecretKeys: description: |- ConnectionSecretKeys is the list of keys that will be exposed to the end @@ -219,6 +228,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - conversionReviewVersions type: object @@ -265,12 +275,18 @@ spec: required: - name type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf group: description: |- Group specifies the API group of the defined composite resource. Composite resources are served under `/apis//...`. Must match the name of the XRD (in the form `.`). type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf metadata: description: Metadata specifies the desired metadata for the defined composite resource and claim CRD's. @@ -309,6 +325,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic kind: description: |- kind is the serialized kind of the resource. It is normally CamelCase and singular. @@ -333,6 +350,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic singular: description: singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`. @@ -341,6 +359,9 @@ spec: - kind - plural type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf versions: description: |- Versions is the list of all API versions of the defined composite @@ -480,6 +501,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml index 35df695b2..682c12578 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositionrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -34,8 +35,12 @@ spec: schema: openAPIV3Schema: description: |- - A CompositionRevision represents a revision in time of a Composition. - Revisions are created by Crossplane; they should be treated as immutable. + A CompositionRevision represents a revision of a Composition. Crossplane + creates new revisions when there are changes to the Composition. + + + Crossplane creates and manages CompositionRevisions. Don't directly edit + CompositionRevisions. properties: apiVersion: description: |- @@ -74,6 +79,9 @@ spec: - apiVersion - kind type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf environment: description: |- Environment configures the environment in which resources are rendered. @@ -283,7 +291,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options on - a field path + a field path. properties: appendSlice: description: Specifies that already existing elements @@ -458,6 +466,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -467,6 +476,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -578,17 +588,20 @@ spec: Mode controls what type or "mode" of Composition will be used. - "Resources" (the default) indicates that a Composition uses what is - commonly referred to as "Patch & Transform" or P&T composition. This mode - of Composition uses an array of resources, each a template for a composed - resource. + "Pipeline" indicates that a Composition specifies a pipeline of + Composition Functions, each of which is responsible for producing + composed resources that Crossplane should create or update. + + "Resources" indicates that a Composition uses what is commonly referred + to as "Patch & Transform" or P&T composition. This mode of Composition + uses an array of resources, each a template for a composed resource. - "Pipeline" indicates that a Composition specifies a pipeline - of Composition Functions, each of which is responsible for producing - composed resources that Crossplane should create or update. THE PIPELINE - MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - feature flag is disabled. + + All Compositions should use Pipeline mode. Resources mode is deprecated. + Resources mode won't be removed in Crossplane 1.x, and will remain the + default to avoid breaking legacy Compositions. However, it's no longer + accepting new features, and only accepting security related bug fixes. enum: - Resources - Pipeline @@ -602,6 +615,9 @@ spec: PatchSets are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- A PatchSet is a set of patches that can be reused from all resources within @@ -695,7 +711,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -871,6 +887,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -880,6 +897,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -976,13 +994,49 @@ spec: The Pipeline is only used by the "Pipeline" mode of Composition. It is ignored by other modes. - - - THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - feature flag is disabled. items: description: A PipelineStep in a Composition Function pipeline. properties: + credentials: + description: Credentials are optional credentials that the Composition + Function needs. + items: + description: |- + FunctionCredentials are optional credentials that a Composition Function + needs to run. + properties: + name: + description: Name of this set of credentials. + type: string + secretRef: + description: |- + A SecretRef is a reference to a secret containing credentials that should + be supplied to the function. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - name + - source + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -1010,6 +1064,9 @@ spec: - step type: object type: array + x-kubernetes-list-map-keys: + - step + x-kubernetes-list-type: map publishConnectionDetailsWithStoreConfigRef: default: name: default @@ -1037,6 +1094,9 @@ spec: Resources are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- ComposedTemplate is used to provide information about how the composed resource @@ -1191,7 +1251,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -1367,6 +1427,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -1376,6 +1437,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -1471,7 +1533,7 @@ spec: items: description: |- ReadinessCheck is used to indicate how to tell whether a resource is ready - for consumption + for consumption. properties: fieldPath: description: FieldPath shows the path of the field whose @@ -1525,7 +1587,14 @@ spec: type: object type: array revision: - description: Revision number. Newer revisions have larger numbers. + description: |- + Revision number. Newer revisions have larger numbers. + + + This number can change. When a Composition transitions from state A + -> B -> A there will be only two CompositionRevisions. Crossplane will + edit the original CompositionRevision to change its revision number from + 0 to 2. format: int64 type: integer writeConnectionSecretsToNamespace: @@ -1564,6 +1633,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. @@ -1610,8 +1686,12 @@ spec: schema: openAPIV3Schema: description: |- - A CompositionRevision represents a revision in time of a Composition. - Revisions are created by Crossplane; they should be treated as immutable. + A CompositionRevision represents a revision of a Composition. Crossplane + creates new revisions when there are changes to the Composition. + + + Crossplane creates and manages CompositionRevisions. Don't directly edit + CompositionRevisions. properties: apiVersion: description: |- @@ -1650,6 +1730,9 @@ spec: - apiVersion - kind type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf environment: description: |- Environment configures the environment in which resources are rendered. @@ -1859,7 +1942,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options on - a field path + a field path. properties: appendSlice: description: Specifies that already existing elements @@ -2034,6 +2117,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -2043,6 +2127,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -2154,17 +2239,20 @@ spec: Mode controls what type or "mode" of Composition will be used. - "Resources" (the default) indicates that a Composition uses what is - commonly referred to as "Patch & Transform" or P&T composition. This mode - of Composition uses an array of resources, each a template for a composed - resource. + "Pipeline" indicates that a Composition specifies a pipeline of + Composition Functions, each of which is responsible for producing + composed resources that Crossplane should create or update. + + "Resources" indicates that a Composition uses what is commonly referred + to as "Patch & Transform" or P&T composition. This mode of Composition + uses an array of resources, each a template for a composed resource. - "Pipeline" indicates that a Composition specifies a pipeline - of Composition Functions, each of which is responsible for producing - composed resources that Crossplane should create or update. THE PIPELINE - MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - feature flag is disabled. + + All Compositions should use Pipeline mode. Resources mode is deprecated. + Resources mode won't be removed in Crossplane 1.x, and will remain the + default to avoid breaking legacy Compositions. However, it's no longer + accepting new features, and only accepting security related bug fixes. enum: - Resources - Pipeline @@ -2178,6 +2266,9 @@ spec: PatchSets are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- A PatchSet is a set of patches that can be reused from all resources within @@ -2271,7 +2362,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -2447,6 +2538,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -2456,6 +2548,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -2552,13 +2645,49 @@ spec: The Pipeline is only used by the "Pipeline" mode of Composition. It is ignored by other modes. - - - THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - feature flag is disabled. items: description: A PipelineStep in a Composition Function pipeline. properties: + credentials: + description: Credentials are optional credentials that the Composition + Function needs. + items: + description: |- + FunctionCredentials are optional credentials that a Composition Function + needs to run. + properties: + name: + description: Name of this set of credentials. + type: string + secretRef: + description: |- + A SecretRef is a reference to a secret containing credentials that should + be supplied to the function. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - name + - source + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -2586,6 +2715,9 @@ spec: - step type: object type: array + x-kubernetes-list-map-keys: + - step + x-kubernetes-list-type: map publishConnectionDetailsWithStoreConfigRef: default: name: default @@ -2613,6 +2745,9 @@ spec: Resources are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- ComposedTemplate is used to provide information about how the composed resource @@ -2767,7 +2902,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -2943,6 +3078,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -2952,6 +3088,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -3047,7 +3184,7 @@ spec: items: description: |- ReadinessCheck is used to indicate how to tell whether a resource is ready - for consumption + for consumption. properties: fieldPath: description: FieldPath shows the path of the field whose @@ -3101,7 +3238,14 @@ spec: type: object type: array revision: - description: Revision number. Newer revisions have larger numbers. + description: |- + Revision number. Newer revisions have larger numbers. + + + This number can change. When a Composition transitions from state A + -> B -> A there will be only two CompositionRevisions. Crossplane will + edit the original CompositionRevision to change its revision number from + 0 to 2. format: int64 type: integer writeConnectionSecretsToNamespace: @@ -3140,6 +3284,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/apiextensions.crossplane.io_compositions.yaml b/cluster/crds/apiextensions.crossplane.io_compositions.yaml index 0b6bb2872..d8fa40672 100644 --- a/cluster/crds/apiextensions.crossplane.io_compositions.yaml +++ b/cluster/crds/apiextensions.crossplane.io_compositions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -30,7 +31,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: A Composition specifies how a composite resource should be composed. + description: |- + A Composition defines a collection of managed resources or functions that + Crossplane uses to create and manage new composite resources. + + + Read the Crossplane documentation for + [more information about Compositions](https://docs.crossplane.io/latest/concepts/compositions). properties: apiVersion: description: |- @@ -67,6 +74,9 @@ spec: - apiVersion - kind type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf environment: description: |- Environment configures the environment in which resources are rendered. @@ -276,7 +286,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options on - a field path + a field path. properties: appendSlice: description: Specifies that already existing elements @@ -451,6 +461,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -460,6 +471,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -571,17 +583,20 @@ spec: Mode controls what type or "mode" of Composition will be used. - "Resources" (the default) indicates that a Composition uses what is - commonly referred to as "Patch & Transform" or P&T composition. This mode - of Composition uses an array of resources, each a template for a composed - resource. + "Pipeline" indicates that a Composition specifies a pipeline of + Composition Functions, each of which is responsible for producing + composed resources that Crossplane should create or update. + + + "Resources" indicates that a Composition uses what is commonly referred + to as "Patch & Transform" or P&T composition. This mode of Composition + uses an array of resources, each a template for a composed resource. - "Pipeline" indicates that a Composition specifies a pipeline - of Composition Functions, each of which is responsible for producing - composed resources that Crossplane should create or update. THE PIPELINE - MODE IS A BETA FEATURE. It is not honored if the relevant Crossplane - feature flag is disabled. + All Compositions should use Pipeline mode. Resources mode is deprecated. + Resources mode won't be removed in Crossplane 1.x, and will remain the + default to avoid breaking legacy Compositions. However, it's no longer + accepting new features, and only accepting security related bug fixes. enum: - Resources - Pipeline @@ -595,6 +610,9 @@ spec: PatchSets are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- A PatchSet is a set of patches that can be reused from all resources within @@ -688,7 +706,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -864,6 +882,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -873,6 +892,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -969,13 +989,49 @@ spec: The Pipeline is only used by the "Pipeline" mode of Composition. It is ignored by other modes. - - - THIS IS A BETA FIELD. It is not honored if the relevant Crossplane - feature flag is disabled. items: description: A PipelineStep in a Composition Function pipeline. properties: + credentials: + description: Credentials are optional credentials that the Composition + Function needs. + items: + description: |- + FunctionCredentials are optional credentials that a Composition Function + needs to run. + properties: + name: + description: Name of this set of credentials. + type: string + secretRef: + description: |- + A SecretRef is a reference to a secret containing credentials that should + be supplied to the function. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + source: + description: Source of the function credentials. + enum: + - None + - Secret + type: string + required: + - name + - source + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map functionRef: description: |- FunctionRef is a reference to the Composition Function this step should @@ -1003,6 +1059,9 @@ spec: - step type: object type: array + x-kubernetes-list-map-keys: + - step + x-kubernetes-list-type: map publishConnectionDetailsWithStoreConfigRef: default: name: default @@ -1030,6 +1089,9 @@ spec: Resources are only used by the "Resources" mode of Composition. They are ignored by other modes. + + + Deprecated: Use Composition Functions instead. items: description: |- ComposedTemplate is used to provide information about how the composed resource @@ -1184,7 +1246,7 @@ spec: type: string mergeOptions: description: MergeOptions Specifies merge options - on a field path + on a field path. properties: appendSlice: description: Specifies that already existing elements @@ -1360,6 +1422,7 @@ spec: `ToJson` converts any input value into its raw JSON representation. `ToSha1`, `ToSha256` and `ToSha512` generate a hash value based on the input converted to JSON. + `ToAdler32` generate a addler32 hash based on the input string. enum: - ToUpper - ToLower @@ -1369,6 +1432,7 @@ spec: - ToSha1 - ToSha256 - ToSha512 + - ToAdler32 type: string fmt: description: |- @@ -1464,7 +1528,7 @@ spec: items: description: |- ReadinessCheck is used to indicate how to tell whether a resource is ready - for consumption + for consumption. properties: fieldPath: description: FieldPath shows the path of the field whose diff --git a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml index 9a703e214..03db70ad1 100644 --- a/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml +++ b/cluster/crds/apiextensions.crossplane.io_environmentconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -24,8 +25,13 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: A EnvironmentConfig contains a set of arbitrary, unstructured - values. + description: |- + An EnvironmentConfig contains user-defined unstructured values for + use in a Composition. + + + Read the Crossplane documentation for + [more information about EnvironmentConfigs](https://docs.crossplane.io/latest/concepts/environment-configs). properties: apiVersion: description: |- diff --git a/cluster/crds/apiextensions.crossplane.io_usages.yaml b/cluster/crds/apiextensions.crossplane.io_usages.yaml index 0cd30d52f..34b658999 100644 --- a/cluster/crds/apiextensions.crossplane.io_usages.yaml +++ b/cluster/crds/apiextensions.crossplane.io_usages.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -28,8 +29,16 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: A Usage defines a deletion blocking relationship between two - resources. + description: |- + A Usage defines a deletion blocking relationship between two resources. + + + Usages prevent accidental deletion of a single resource or deletion of + resources with dependent resources. + + + Read the Crossplane documentation for + [more information about Compositions](https://docs.crossplane.io/latest/concepts/usages). properties: apiVersion: description: |- @@ -138,6 +147,11 @@ spec: reason: description: Reason is the reason for blocking deletion of the resource. type: string + replayDeletion: + description: ReplayDeletion will trigger a deletion on the used resource + during the deletion of the usage itself, if it was attempted to + be deleted at least once. + type: boolean required: - of type: object @@ -163,6 +177,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml index 646282639..bc8c4683f 100644 --- a/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_configurationrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -41,7 +42,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: A ConfigurationRevision that has been added to Crossplane. + description: |- + A ConfigurationRevision represents a revision of a Configuration. Crossplane + creates new revisions when there are changes to a Configuration. + + + Crossplane creates and manages ConfigurationRevision. Don't directly edit + ConfigurationRevisions. properties: apiVersion: description: |- @@ -70,7 +77,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object desiredState: description: DesiredState of the PackageRevision. Can be either Active @@ -106,10 +113,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -153,6 +165,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. @@ -229,6 +248,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -237,6 +257,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything @@ -244,18 +265,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/cluster/crds/pkg.crossplane.io_configurations.yaml b/cluster/crds/pkg.crossplane.io_configurations.yaml index b9a43d5cc..325aacd96 100644 --- a/cluster/crds/pkg.crossplane.io_configurations.yaml +++ b/cluster/crds/pkg.crossplane.io_configurations.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -32,8 +33,14 @@ spec: name: v1 schema: openAPIV3Schema: - description: Configuration is the CRD type for a request to add a configuration - to Crossplane. + description: |- + A Configuration installs an OCI compatible Crossplane package, extending + Crossplane with support for new kinds of CompositeResourceDefinitions and + Compositions. + + + Read the Crossplane documentation for + [more information about Configuration packages](https://docs.crossplane.io/latest/concepts/packages). properties: apiVersion: description: |- @@ -64,7 +71,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object ignoreCrossplaneConstraints: default: false @@ -92,10 +99,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -145,6 +157,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml index 5e55813c4..e5313498e 100644 --- a/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_controllerconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -24,10 +25,15 @@ spec: schema: openAPIV3Schema: description: |- - ControllerConfig is the CRD type for a packaged controller configuration. - Deprecated: This API is replaced by DeploymentRuntimeConfig, and is scheduled - to be removed in a future release. See the design doc for more details: - https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md + A ControllerConfig applies settings to controllers like Provider pods. + Deprecated: Use the + [DeploymentRuntimeConfig](https://docs.crossplane.io/latest/concepts/providers#runtime-configuration) + instead. + + + Read the + [Package Runtime Configuration](https://github.com/crossplane/crossplane/blob/11bbe13ea3604928cc4e24e8d0d18f3f5f7e847c/design/one-pager-package-runtime-config.md) + design document for more details. properties: apiVersion: description: |- @@ -106,11 +112,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -138,11 +146,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -155,6 +165,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -199,11 +210,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -231,14 +244,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -299,11 +315,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -318,12 +336,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -333,12 +351,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -379,11 +397,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -403,6 +423,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -425,6 +446,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -474,11 +496,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -493,12 +517,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -508,12 +532,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -553,11 +577,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -577,6 +603,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -589,6 +616,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules (e.g. @@ -646,11 +674,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -665,12 +695,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -680,12 +710,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -726,11 +756,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -750,6 +782,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -772,6 +805,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the anti-affinity requirements specified by this field are not met at @@ -821,11 +855,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -840,12 +876,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -855,12 +891,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -900,11 +936,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -924,6 +962,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -936,6 +975,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object args: @@ -985,10 +1025,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap or its key @@ -1047,10 +1092,15 @@ spec: be a valid secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret or its key must @@ -1080,10 +1130,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap must be defined @@ -1098,10 +1153,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret must be defined @@ -1139,10 +1199,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -1157,7 +1222,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -1168,7 +1233,7 @@ spec: labels on the pod, not the pod selector. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object type: object nodeName: @@ -1190,6 +1255,29 @@ spec: PodSecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object fsGroup: description: |- A special supplemental group that applies to all containers in a pod. @@ -1309,6 +1397,7 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -1328,6 +1417,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: description: |- The Windows specific settings applied to all containers. @@ -1482,7 +1572,7 @@ spec: to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. - More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + More info: https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/585-runtime-class/README.md This is a beta feature as of Kubernetes v1.14. type: string securityContext: @@ -1500,6 +1590,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -1512,12 +1626,14 @@ spec: description: Capability represent POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: description: Capability represent POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -1725,6 +1841,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name of a Volume. @@ -1734,6 +1852,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -1862,6 +2003,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' @@ -1883,10 +2025,15 @@ spec: More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -1922,10 +2069,15 @@ spec: to OpenStack. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -1989,11 +2141,17 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the ConfigMap or its @@ -2026,10 +2184,15 @@ spec: secret object contains more than one secret, all secret references are passed. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2072,8 +2235,8 @@ spec: properties: fieldRef: description: 'Required: Selects a field of the pod: - only annotations, labels, name and namespace are - supported.' + only annotations, labels, name, namespace and uid + are supported.' properties: apiVersion: description: Version of the schema the FieldPath @@ -2132,6 +2295,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: description: |- @@ -2254,6 +2418,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -2398,11 +2563,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2430,7 +2597,7 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: @@ -2474,6 +2641,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: description: |- wwids Optional: FC volume world wide identifiers (wwids) @@ -2481,6 +2649,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: description: |- @@ -2517,10 +2686,15 @@ spec: scripts. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2701,6 +2875,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: description: |- readOnly here will force the ReadOnly setting in VolumeMounts. @@ -2711,10 +2886,15 @@ spec: and initiator authentication properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -2889,11 +3069,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -2972,11 +3154,17 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the ConfigMap @@ -2999,7 +3187,7 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, labels, - name and namespace are supported.' + name, namespace and uid are supported.' properties: apiVersion: description: Version of the schema the @@ -3062,6 +3250,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about the secret data @@ -3105,11 +3294,17 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional field specify whether the @@ -3148,6 +3343,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: description: quobyte represents a Quobyte mount on the host @@ -3218,6 +3414,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic pool: description: |- pool is the rados pool name. @@ -3238,10 +3435,15 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -3285,10 +3487,15 @@ spec: sensitive information. If this is not provided, Login operation will fail. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -3372,6 +3579,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: description: optional field specify whether the Secret or its keys must be defined @@ -3403,10 +3611,15 @@ spec: credentials. If not specified, default values will be attempted. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic diff --git a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml index d955677f9..77bcab8a3 100644 --- a/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml +++ b/cluster/crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml @@ -23,10 +23,12 @@ spec: schema: openAPIV3Schema: description: |- - A DeploymentRuntimeConfig is used to configure the package runtime when - the package uses a runtime and the package manager is running with - --package-runtime=Deployment (the default). See the following design doc for - more details:https://github.com/crossplane/crossplane/blob/91edeae3fcac96c6c8a1759a723981eea4bb77e4/design/one-pager-package-runtime-config.md#migration-from-controllerconfig + The DeploymentRuntimeConfig provides settings for the Kubernetes Deployment + of a Provider or composition function package. + + + Read the Crossplane documentation for + [more information about DeploymentRuntimeConfigs](https://docs.crossplane.io/latest/concepts/providers/#runtime-configuration). properties: apiVersion: description: |- @@ -66,7 +68,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -76,7 +78,7 @@ spec: (scope and select) objects. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object name: description: Name is the name of the object. @@ -150,11 +152,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -310,11 +314,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -343,11 +349,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -361,6 +369,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -406,11 +415,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -439,14 +450,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -512,11 +526,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -531,12 +547,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -546,12 +562,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -594,11 +610,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -618,6 +636,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -640,6 +659,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -691,11 +711,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -710,12 +732,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -725,12 +747,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -772,11 +794,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -796,6 +820,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -808,6 +833,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: description: Describes pod anti-affinity scheduling @@ -869,11 +895,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -888,12 +916,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -903,12 +931,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -951,11 +979,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -975,6 +1005,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -997,6 +1028,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the anti-affinity requirements specified by this field are not met at @@ -1048,11 +1080,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1067,12 +1101,12 @@ spec: description: |- MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -1082,12 +1116,12 @@ spec: description: |- MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. items: type: string @@ -1129,11 +1163,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -1153,6 +1189,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: description: |- This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching @@ -1165,6 +1202,7 @@ spec: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object automountServiceAccountToken: @@ -1195,6 +1233,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -1208,6 +1247,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -1244,10 +1284,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -1314,10 +1359,15 @@ spec: secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -1332,6 +1382,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -1348,10 +1401,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -1368,10 +1426,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -1381,6 +1444,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -1422,6 +1486,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -1455,6 +1520,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -1539,6 +1605,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -1572,6 +1639,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -1652,6 +1720,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -1712,6 +1781,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -1871,6 +1941,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -1931,6 +2002,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -2128,6 +2200,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -2141,6 +2237,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -2148,6 +2245,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -2307,6 +2405,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -2367,6 +2466,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -2510,6 +2610,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. @@ -2529,6 +2632,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name @@ -2539,6 +2644,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -2556,6 +2684,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -2567,6 +2698,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map dnsConfig: description: |- Specifies the DNS parameters of a pod. @@ -2581,6 +2715,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic options: description: |- A list of DNS resolver options. @@ -2598,6 +2733,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic searches: description: |- A list of DNS search domains for host-name lookup. @@ -2606,6 +2742,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: description: |- @@ -2653,6 +2790,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -2666,6 +2804,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -2702,10 +2841,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -2772,10 +2916,15 @@ spec: secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -2790,6 +2939,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -2806,10 +2958,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -2826,10 +2983,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -2839,6 +3001,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -2877,6 +3040,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -2910,6 +3074,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -2994,6 +3159,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -3027,6 +3193,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3104,6 +3271,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -3164,6 +3332,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3313,6 +3482,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -3373,6 +3543,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3557,6 +3728,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -3570,6 +3765,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -3577,6 +3773,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -3730,6 +3927,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -3790,6 +3988,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -3943,6 +4142,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. @@ -3962,6 +4164,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name @@ -3972,6 +4176,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -3989,6 +4216,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -4000,10 +4230,13 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map hostAliases: description: |- HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - file if specified. This is only valid for non-hostNetwork pods. + file if specified. items: description: |- HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -4014,11 +4247,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic ip: description: IP address of the host file entry. type: string + required: + - ip type: object type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map hostIPC: description: |- Use the host's ipc namespace. @@ -4063,14 +4302,24 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string + required: + - name type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map initContainers: description: |- List of initialization containers belonging to the pod. @@ -4103,6 +4352,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic command: description: |- Entrypoint array. Not executed within a shell. @@ -4116,6 +4366,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic env: description: |- List of environment variables to set in the container. @@ -4152,10 +4403,15 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -4222,10 +4478,15 @@ spec: secret key. type: string name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the @@ -4240,6 +4501,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map envFrom: description: |- List of sources to populate environment variables in the container. @@ -4256,10 +4520,15 @@ spec: description: The ConfigMap to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -4276,10 +4545,15 @@ spec: description: The Secret to select from properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -4289,6 +4563,7 @@ spec: x-kubernetes-map-type: atomic type: object type: array + x-kubernetes-list-type: atomic image: description: |- Container image name. @@ -4330,6 +4605,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -4363,6 +4639,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -4447,6 +4724,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object httpGet: description: HTTPGet specifies the http @@ -4480,6 +4758,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -4560,6 +4839,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -4620,6 +4900,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -4779,6 +5060,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -4839,6 +5121,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -5036,6 +5319,30 @@ spec: 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by this container. If set, this profile + overrides the pod's appArmorProfile. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object capabilities: description: |- The capabilities to add/drop when running containers. @@ -5049,6 +5356,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: description: Removed capabilities items: @@ -5056,6 +5364,7 @@ spec: POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: description: |- @@ -5215,6 +5524,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: description: |- @@ -5275,6 +5585,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic path: description: Path to access on the HTTP server. @@ -5418,6 +5729,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map volumeMounts: description: |- Pod volumes to mount into the container's filesystem. @@ -5437,6 +5751,8 @@ spec: to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). type: string name: description: This must match the Name @@ -5447,6 +5763,29 @@ spec: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + + If ReadOnly is false, this field has no meaning and must be unspecified. + + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string subPath: description: |- Path within the volume from which the container's volume should be mounted. @@ -5464,6 +5803,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map workingDir: description: |- Container's working directory. @@ -5475,6 +5817,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map nodeName: description: |- NodeName is a request to schedule this pod onto a specific node. If it is non-empty, @@ -5504,6 +5849,7 @@ spec: - spec.hostPID - spec.hostIPC - spec.hostUsers + - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup @@ -5513,6 +5859,7 @@ spec: - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities @@ -5592,6 +5939,7 @@ spec: - conditionType type: object type: array + x-kubernetes-list-type: atomic resourceClaims: description: |- ResourceClaims defines which ResourceClaims must be allocated @@ -5678,9 +6026,6 @@ spec: SchedulingGates can only be set at pod creation time, and be removed only afterwards. - - - This is a beta feature enabled by the PodSchedulingReadiness feature gate. items: description: PodSchedulingGate is associated to a Pod to guard its scheduling. @@ -5702,6 +6047,29 @@ spec: SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field. properties: + appArmorProfile: + description: |- + appArmorProfile is the AppArmor options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile loaded on the node that should be used. + The profile must be preconfigured on the node to work. + Must match the loaded name of the profile. + Must be set if and only if type is "Localhost". + type: string + type: + description: |- + type indicates which kind of AppArmor profile will be applied. + Valid options are: + Localhost - a profile pre-loaded on the node. + RuntimeDefault - the container runtime's default profile. + Unconfined - no AppArmor enforcement. + type: string + required: + - type + type: object fsGroup: description: |- A special supplemental group that applies to all containers in a pod. @@ -5821,6 +6189,7 @@ spec: format: int64 type: integer type: array + x-kubernetes-list-type: atomic sysctls: description: |- Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported @@ -5841,6 +6210,7 @@ spec: - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: description: |- The Windows specific settings applied to all containers. @@ -5876,7 +6246,7 @@ spec: type: object serviceAccount: description: |- - DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead. type: string serviceAccountName: @@ -5956,6 +6326,7 @@ spec: type: string type: object type: array + x-kubernetes-list-type: atomic topologySpreadConstraints: description: |- TopologySpreadConstraints describes how a group of pods ought to spread across topology @@ -5998,11 +6369,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -6073,9 +6446,6 @@ spec: In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. - - - This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). format: int32 type: integer nodeAffinityPolicy: @@ -6262,6 +6632,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic path: description: 'path is Optional: Used as the mounted root, rather than the full @@ -6284,10 +6655,15 @@ spec: More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6323,10 +6699,15 @@ spec: to OpenStack. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6391,11 +6772,17 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the @@ -6428,10 +6815,15 @@ spec: secret object contains more than one secret, all secret references are passed. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6478,7 +6870,8 @@ spec: fieldRef: description: 'Required: Selects a field of the pod: only annotations, - labels, name and namespace are supported.' + labels, name, namespace and uid + are supported.' properties: apiVersion: description: Version of the schema @@ -6544,6 +6937,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: description: |- @@ -6666,6 +7060,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic dataSource: description: |- dataSource field can be used to specify either: @@ -6813,11 +7208,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -6845,7 +7242,7 @@ spec: If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. type: string volumeMode: @@ -6891,6 +7288,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic wwids: description: |- wwids Optional: FC volume world wide identifiers (wwids) @@ -6898,6 +7296,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: description: |- @@ -6934,10 +7333,15 @@ spec: scripts. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7122,6 +7526,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic readOnly: description: |- readOnly here will force the ReadOnly setting in VolumeMounts. @@ -7132,10 +7537,15 @@ spec: for iSCSI target and initiator authentication properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7318,11 +7728,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string @@ -7403,11 +7815,17 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify @@ -7433,8 +7851,8 @@ spec: description: 'Required: Selects a field of the pod: only annotations, - labels, name and namespace - are supported.' + labels, name, namespace + and uid are supported.' properties: apiVersion: description: Version @@ -7508,6 +7926,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: description: secret information about @@ -7552,11 +7971,17 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional field specify @@ -7597,6 +8022,7 @@ spec: type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: description: quobyte represents a Quobyte mount @@ -7667,6 +8093,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic pool: description: |- pool is the rados pool name. @@ -7687,10 +8114,15 @@ spec: More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7736,10 +8168,15 @@ spec: sensitive information. If this is not provided, Login operation will fail. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7826,6 +8263,7 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic optional: description: optional field specify whether the Secret or its keys must be defined @@ -7858,10 +8296,15 @@ spec: credentials. If not specified, default values will be attempted. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7912,6 +8355,9 @@ spec: - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map required: - containers type: object @@ -7936,7 +8382,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -7946,7 +8392,7 @@ spec: (scope and select) objects. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object name: description: Name is the name of the object. @@ -7967,7 +8413,7 @@ spec: Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. - More info: http://kubernetes.io/docs/user-guide/annotations + More info: http:https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ type: object labels: additionalProperties: @@ -7977,7 +8423,7 @@ spec: (scope and select) objects. Labels will be merged with internal labels used by crossplane, and labels with a crossplane.io key might be overwritten. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object name: description: Name is the name of the object. diff --git a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml index 6b269fd36..6951631a1 100644 --- a/cluster/crds/pkg.crossplane.io_functionrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_functionrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -38,10 +39,16 @@ spec: - jsonPath: .metadata.creationTimestamp name: AGE type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: A FunctionRevision that has been added to Crossplane. + description: |- + A FunctionRevision represents a revision of a Function. Crossplane + creates new revisions when there are changes to the Function. + + + Crossplane creates and manages FunctionRevisions. Don't directly edit + FunctionRevisions. properties: apiVersion: description: |- @@ -70,7 +77,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- @@ -118,10 +125,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -196,6 +208,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. @@ -277,6 +296,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -285,6 +305,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything @@ -292,18 +313,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object @@ -314,3 +338,324 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Healthy')].status + name: HEALTHY + type: string + - jsonPath: .spec.revision + name: REVISION + type: string + - jsonPath: .spec.image + name: IMAGE + type: string + - jsonPath: .spec.desiredState + name: STATE + type: string + - jsonPath: .status.foundDependencies + name: DEP-FOUND + type: string + - jsonPath: .status.installedDependencies + name: DEP-INSTALLED + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + A FunctionRevision represents a revision of a Function. Crossplane + creates new revisions when there are changes to the Function. + + + Crossplane creates and manages FunctionRevisions. Don't directly edit + FunctionRevisions. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionRevisionSpec specifies configuration for a FunctionRevision. + properties: + commonLabels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + controllerConfigRef: + description: |- + ControllerConfigRef references a ControllerConfig resource that will be + used to configure the packaged controller Deployment. + Deprecated: Use RuntimeConfigReference instead. + properties: + name: + description: Name of the ControllerConfig. + type: string + required: + - name + type: object + desiredState: + description: DesiredState of the PackageRevision. Can be either Active + or Inactive. + type: string + ignoreCrossplaneConstraints: + default: false + description: |- + IgnoreCrossplaneConstraints indicates to the package manager whether to + honor Crossplane version constrains specified by the package. + Default is false. + type: boolean + image: + description: Package image used by install Pod to extract package + contents. + type: string + packagePullPolicy: + default: IfNotPresent + description: |- + PackagePullPolicy defines the pull policy for the package. It is also + applied to any images pulled for the package, such as a provider's + controller image. + Default is IfNotPresent. + type: string + packagePullSecrets: + description: |- + PackagePullSecrets are named secrets in the same namespace that can be + used to fetch packages from private registries. They are also applied to + any images pulled for the package, such as a provider's controller image. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + type: array + revision: + description: |- + Revision number. Indicates when the revision will be garbage collected + based on the parent's RevisionHistoryLimit. + format: int64 + type: integer + runtimeConfigRef: + default: + name: default + description: |- + RuntimeConfigRef references a RuntimeConfig resource that will be used + to configure the package runtime. + properties: + apiVersion: + default: pkg.crossplane.io/v1beta1 + description: API version of the referent. + type: string + kind: + default: DeploymentRuntimeConfig + description: Kind of the referent. + type: string + name: + description: Name of the RuntimeConfig. + type: string + required: + - name + type: object + skipDependencyResolution: + default: false + description: |- + SkipDependencyResolution indicates to the package manager whether to skip + resolving dependencies for a package. Setting this value to true may have + unintended consequences. + Default is false. + type: boolean + tlsClientSecretName: + description: |- + TLSClientSecretName is the name of the TLS Secret that stores client + certificates of the Provider. + type: string + tlsServerSecretName: + description: |- + TLSServerSecretName is the name of the TLS Secret that stores server + certificates of the Provider. + type: string + required: + - desiredState + - image + - revision + type: object + status: + description: FunctionRevisionStatus represents the observed state of a + FunctionRevision. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endpoint: + description: |- + Endpoint is the gRPC endpoint where Crossplane will send + RunFunctionRequests. + type: string + foundDependencies: + description: Dependency information. + format: int64 + type: integer + installedDependencies: + format: int64 + type: integer + invalidDependencies: + format: int64 + type: integer + objectRefs: + description: References to objects owned by PackageRevision. + items: + description: |- + A TypedReference refers to an object by Name, Kind, and APIVersion. It is + commonly used to reference cluster-scoped objects or objects where the + namespace is already known. + properties: + apiVersion: + description: APIVersion of the referenced object. + type: string + kind: + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + uid: + description: UID of the referenced object. + type: string + required: + - apiVersion + - kind + - name + type: object + type: array + permissionRequests: + description: |- + PermissionRequests made by this package. The package declares that its + controller needs these permissions to run. The RBAC manager is + responsible for granting them. + items: + description: |- + PolicyRule holds information that describes a policy rule, but does not contain information + about who the rule applies to or which namespace the rule applies to. + properties: + apiGroups: + description: |- + APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. + items: + type: string + type: array + x-kubernetes-list-type: atomic + nonResourceURLs: + description: |- + NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + items: + type: string + type: array + x-kubernetes-list-type: atomic + resourceNames: + description: ResourceNames is an optional white list of names + that the rule applies to. An empty set means that everything + is allowed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + resources: + description: Resources is a list of resources this rule applies + to. '*' represents all resources. + items: + type: string + type: array + x-kubernetes-list-type: atomic + verbs: + description: Verbs is a list of Verbs that apply to ALL the + ResourceKinds contained in this rule. '*' represents all verbs. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - verbs + type: object + type: array + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/cluster/crds/pkg.crossplane.io_functions.yaml b/cluster/crds/pkg.crossplane.io_functions.yaml index 0753e10e2..76ed888fc 100644 --- a/cluster/crds/pkg.crossplane.io_functions.yaml +++ b/cluster/crds/pkg.crossplane.io_functions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -29,11 +30,16 @@ spec: - jsonPath: .metadata.creationTimestamp name: AGE type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: Function is the CRD type for a request to deploy a long-running - Function. + description: |- + A Function installs an OCI compatible Crossplane package, extending + Crossplane with support for a new kind of composition function. + + + Read the Crossplane documentation for + [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). properties: apiVersion: description: |- @@ -62,7 +68,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- @@ -102,10 +108,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -176,6 +187,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. @@ -220,3 +238,224 @@ spec: storage: true subresources: status: {} + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Installed')].status + name: INSTALLED + type: string + - jsonPath: .status.conditions[?(@.type=='Healthy')].status + name: HEALTHY + type: string + - jsonPath: .spec.package + name: PACKAGE + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: |- + A Function installs an OCI compatible Crossplane package, extending + Crossplane with support for a new kind of composition function. + + + Read the Crossplane documentation for + [more information about Functions](https://docs.crossplane.io/latest/concepts/composition-functions). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: FunctionSpec specifies the configuration of a Function. + properties: + commonLabels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + controllerConfigRef: + description: |- + ControllerConfigRef references a ControllerConfig resource that will be + used to configure the packaged controller Deployment. + Deprecated: Use RuntimeConfigReference instead. + properties: + name: + description: Name of the ControllerConfig. + type: string + required: + - name + type: object + ignoreCrossplaneConstraints: + default: false + description: |- + IgnoreCrossplaneConstraints indicates to the package manager whether to + honor Crossplane version constrains specified by the package. + Default is false. + type: boolean + package: + description: Package is the name of the package that is being requested. + type: string + packagePullPolicy: + default: IfNotPresent + description: |- + PackagePullPolicy defines the pull policy for the package. + Default is IfNotPresent. + type: string + packagePullSecrets: + description: |- + PackagePullSecrets are named secrets in the same namespace that can be used + to fetch packages from private registries. + items: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + type: array + revisionActivationPolicy: + default: Automatic + description: |- + RevisionActivationPolicy specifies how the package controller should + update from one revision to the next. Options are Automatic or Manual. + Default is Automatic. + type: string + revisionHistoryLimit: + default: 1 + description: |- + RevisionHistoryLimit dictates how the package controller cleans up old + inactive package revisions. + Defaults to 1. Can be disabled by explicitly setting to 0. + format: int64 + type: integer + runtimeConfigRef: + default: + name: default + description: |- + RuntimeConfigRef references a RuntimeConfig resource that will be used + to configure the package runtime. + properties: + apiVersion: + default: pkg.crossplane.io/v1beta1 + description: API version of the referent. + type: string + kind: + default: DeploymentRuntimeConfig + description: Kind of the referent. + type: string + name: + description: Name of the RuntimeConfig. + type: string + required: + - name + type: object + skipDependencyResolution: + default: false + description: |- + SkipDependencyResolution indicates to the package manager whether to skip + resolving dependencies for a package. Setting this value to true may have + unintended consequences. + Default is false. + type: boolean + required: + - package + type: object + status: + description: FunctionStatus represents the observed state of a Function. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentIdentifier: + description: |- + CurrentIdentifier is the most recent package source that was used to + produce a revision. The package manager uses this field to determine + whether to check for package updates for a given source when + packagePullPolicy is set to IfNotPresent. Manually removing this field + will cause the package manager to check that the current revision is + correct for the given package source. + type: string + currentRevision: + description: |- + CurrentRevision is the name of the current package revision. It will + reflect the most up to date revision, whether it has been activated or + not. + type: string + type: object + type: object + served: true + storage: false + subresources: + status: {} diff --git a/cluster/crds/pkg.crossplane.io_locks.yaml b/cluster/crds/pkg.crossplane.io_locks.yaml index 4daff56a3..b55f731bf 100644 --- a/cluster/crds/pkg.crossplane.io_locks.yaml +++ b/cluster/crds/pkg.crossplane.io_locks.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: diff --git a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml index a29d6c6b0..d1abe2dc1 100644 --- a/cluster/crds/pkg.crossplane.io_providerrevisions.yaml +++ b/cluster/crds/pkg.crossplane.io_providerrevisions.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -41,7 +42,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: A ProviderRevision that has been added to Crossplane. + description: |- + A ProviderRevision represents a revision of a Provider. Crossplane + creates new revisions when there are changes to a Provider. + + + Crossplane creates and manages ProviderRevisions. Don't directly edit + ProviderRevisions. properties: apiVersion: description: |- @@ -70,7 +77,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- @@ -118,10 +125,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -196,6 +208,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. @@ -272,6 +291,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -280,6 +300,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything @@ -287,18 +308,21 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs. items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/cluster/crds/pkg.crossplane.io_providers.yaml b/cluster/crds/pkg.crossplane.io_providers.yaml index da22951e9..79b33ab12 100644 --- a/cluster/crds/pkg.crossplane.io_providers.yaml +++ b/cluster/crds/pkg.crossplane.io_providers.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -32,7 +33,13 @@ spec: name: v1 schema: openAPIV3Schema: - description: Provider is the CRD type for a request to add a provider to Crossplane. + description: |- + A Provider installs an OCI compatible Crossplane package, extending + Crossplane with support for new kinds of managed resources. + + + Read the Crossplane documentation for + [more information about Providers](https://docs.crossplane.io/latest/concepts/providers). properties: apiVersion: description: |- @@ -63,7 +70,7 @@ spec: Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. - More info: http://kubernetes.io/docs/user-guide/labels + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ type: object controllerConfigRef: description: |- @@ -103,10 +110,15 @@ spec: referenced object inside the same namespace. properties: name: + default: "" description: |- Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -177,6 +189,13 @@ spec: A Message containing details about this condition's last transition from one status to another, if any. type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer reason: description: A Reason for this condition's last transition from one status to another. diff --git a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml index 3ebc1d723..85ffc16a6 100644 --- a/cluster/crds/secrets.crossplane.io_storeconfigs.yaml +++ b/cluster/crds/secrets.crossplane.io_storeconfigs.yaml @@ -1,3 +1,4 @@ +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -29,8 +30,9 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: A StoreConfig configures how Crossplane controllers should store - connection details. + description: |- + A StoreConfig configures how Crossplane controllers should store connection + details in an external secret store. properties: apiVersion: description: |- diff --git a/cluster/images/crossplane/Dockerfile b/cluster/images/crossplane/Dockerfile deleted file mode 100644 index d0a497135..000000000 --- a/cluster/images/crossplane/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM gcr.io/distroless/static@sha256:9be3fcc6abeaf985b5ecce59451acbcbb15e7be39472320c538d0d55a0834edc - -ARG TARGETOS -ARG TARGETARCH - -COPY bin/$TARGETOS\_$TARGETARCH/crossplane /usr/local/bin/ -COPY crds /crds -COPY webhookconfigurations /webhookconfigurations -EXPOSE 8080 -USER 65532 -ENTRYPOINT ["crossplane"] diff --git a/cluster/images/crossplane/Makefile b/cluster/images/crossplane/Makefile deleted file mode 100755 index 72f26d2d6..000000000 --- a/cluster/images/crossplane/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# ==================================================================================== -# Setup Project - -include ../../../build/makelib/common.mk - -# ==================================================================================== -# Options - -include ../../../build/makelib/imagelight.mk - -# ==================================================================================== -# Targets - -img.build: - @$(INFO) docker build $(IMAGE) - @$(MAKE) BUILD_ARGS="--load" img.build.shared - @$(OK) docker build $(IMAGE) - -img.publish: - @$(INFO) docker publish $(IMAGE) - @$(MAKE) BUILD_ARGS="--push" img.build.shared - @$(OK) docker publish $(IMAGE) - -img.build.shared: - @cp Dockerfile $(IMAGE_TEMP_DIR) || $(FAIL) - @cp -r $(OUTPUT_DIR)/bin/ $(IMAGE_TEMP_DIR)/bin || $(FAIL) - @cp -a ../../../cluster/crds $(IMAGE_TEMP_DIR) || $(FAIL) - @cp -a ../../../cluster/webhookconfigurations $(IMAGE_TEMP_DIR) || $(FAIL) - @docker buildx build $(BUILD_ARGS) \ - --platform $(IMAGE_PLATFORMS) \ - -t $(IMAGE) \ - $(IMAGE_TEMP_DIR) || $(FAIL) - -img.promote: - @$(INFO) docker promote $(FROM_IMAGE) to $(TO_IMAGE) - @docker buildx imagetools create -t $(TO_IMAGE) $(FROM_IMAGE) - @$(OK) docker promote $(FROM_IMAGE) to $(TO_IMAGE) diff --git a/cluster/kustomization.yaml b/cluster/kustomization.yaml deleted file mode 100644 index 3cfd869a4..000000000 --- a/cluster/kustomization.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# This kustomization can be used to remotely install all Crossplane CRDs -# by running kubectl apply -k https://github.com/crossplane/crossplane//cluster?ref=master -resources: -- crds/apiextensions.crossplane.io_compositeresourcedefinitions.yaml -- crds/apiextensions.crossplane.io_compositionrevisions.yaml -- crds/apiextensions.crossplane.io_compositions.yaml -- crds/apiextensions.crossplane.io_environmentconfigs.yaml -- crds/apiextensions.crossplane.io_usages.yaml -- crds/pkg.crossplane.io_configurationrevisions.yaml -- crds/pkg.crossplane.io_configurations.yaml -- crds/pkg.crossplane.io_controllerconfigs.yaml -- crds/pkg.crossplane.io_deploymentruntimeconfigs.yaml -- crds/pkg.crossplane.io_functionrevisions.yaml -- crds/pkg.crossplane.io_functions.yaml -- crds/pkg.crossplane.io_locks.yaml -- crds/pkg.crossplane.io_providerrevisions.yaml -- crds/pkg.crossplane.io_providers.yaml -- crds/secrets.crossplane.io_storeconfigs.yaml diff --git a/cluster/local/README.md b/cluster/local/README.md deleted file mode 100644 index 53e098138..000000000 --- a/cluster/local/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Deploying Crossplane Locally - -This directory contains scripts that automate common local development flows for -Crossplane, allowing you to deploy your local build of Crossplane to a `kind` -cluster. Run [kind.sh](./kind.sh) to setup a single-node kind Kubernetes -cluster. diff --git a/cluster/local/kind.sh b/cluster/local/kind.sh deleted file mode 100755 index 1ab270076..000000000 --- a/cluster/local/kind.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env bash - -set -e - -scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# shellcheck disable=SC1090 -projectdir="${scriptdir}/../.." - -# get the build environment variables from the special build.vars target in the main makefile -eval $(make --no-print-directory -C ${scriptdir}/../.. build.vars) - -# ensure the tools we need are installed -make ${KIND} ${KUBECTL} ${HELM3} - -BUILD_IMAGE="${BUILD_REGISTRY}/${PROJECT_NAME}-${TARGETARCH}" -DEFAULT_NAMESPACE="crossplane-system" - -function copy_image_to_cluster() { - local build_image=$1 - local final_image=$2 - local kind_name=$3 - docker tag "${build_image}" "${final_image}" - ${KIND} --name "${kind_name}" load docker-image "${final_image}" - echo "Tagged image: ${final_image}" -} - -# Deletes pods with application prefix. Namespace is expected as the first argument -function delete_pods() { - for pod in $(kubectl get pods -n "$2" -l "app=$1" --no-headers -o custom-columns=NAME:.metadata.name); do - kubectl delete pod "$pod" -n "$2" - done -} - -# current kubectl context == kind-kind, returns boolean -function check_context() { - if [ "$(kubectl config view 2>/dev/null | awk '/current-context/ {print $NF}')" = "kind-kind" ]; then - return 0 - fi - - return 1 -} - -# configure kind -KIND_NAME=${KIND_NAME:-"kind"} -IMAGE_REPOSITORY="xpkg.upbound.io/upbound/${PROJECT_NAME}" -case "${1:-}" in - up) - ${KIND} create cluster --name "${KIND_NAME}" --image "${KUBE_IMAGE}" --wait 5m - ;; - update) - helm_tag="$(cat _output/version)" - copy_image_to_cluster ${BUILD_IMAGE} "${IMAGE_REPOSITORY}:${helm_tag}" "${KIND_NAME}" - ;; - restart) - if check_context; then - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "Restarting \"${PROJECT_NAME}\" deployment pods in \"$ns\" namespace." - delete_pods ${PROJECT_NAME} ${ns} - else - echo "To prevent accidental data loss acting only on 'kind-kind' context. No action is taken." - fi - ;; - helm-install) - echo "copying image for helm" - helm_tag="$(cat _output/version)" - copy_image_to_cluster ${BUILD_IMAGE} "${IMAGE_REPOSITORY}:${helm_tag}" "${KIND_NAME}" - - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "installing helm package into \"$ns\" namespace" - ${HELM3} install ${PROJECT_NAME} --namespace ${ns} --create-namespace ${projectdir}/cluster/charts/${PROJECT_NAME} --set image.pullPolicy=Never,imagePullSecrets='',image.tag="${helm_tag}" --set args='{"--debug"}' ${HELM3_FLAGS} - ;; - helm-upgrade) - echo "copying image for helm" - helm_tag="$(cat _output/version)" - copy_image_to_cluster ${BUILD_IMAGE} "${IMAGE_REPOSITORY}:${helm_tag}" "${KIND_NAME}" - - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "upgrading helm package in \"$ns\" namespace" - ${HELM3} upgrade --install --namespace ${ns} --create-namespace ${PROJECT_NAME} ${projectdir}/cluster/charts/${PROJECT_NAME} --set image.pullPolicy=Never,imagePullSecrets='',image.tag=${helm_tag} --set args='{"--debug"}' ${HELM3_FLAGS} - ;; - helm-delete) - [ "$2" ] && ns=$2 || ns="${DEFAULT_NAMESPACE}" - echo "removing helm package from \"$ns\" namespace" - ${HELM3} uninstall --namespace ${ns} ${PROJECT_NAME} - ;; - helm-list) - ${HELM3} list --all --all-namespaces - ;; - clean) - ${KIND} --name "${KIND_NAME}" delete cluster - ;; - *) - echo "usage:" >&2 - echo " $0 up - create a new kind cluster" >&2 - echo " $0 clean - delete the kind cluster" >&2 - echo " $0 update - push project docker images to kind cluster registry" >&2 - echo " $0 restart project deployment pod(s) in specified namespace [default: \"${DEFAULT_NAMESPACE}\"]" >&2 - echo " $0 helm-install package(s) into provided namespace [default: \"${DEFAULT_NAMESPACE}\"]" >&2 - echo " $0 helm-upgrade - deploy the latest docker images and helm charts to kind cluster" >&2 - echo " $0 helm-delete package(s)" >&2 - echo " $0 helm-list all package(s)" >&2 -esac diff --git a/cluster/meta/meta.pkg.crossplane.io_providers.yaml b/cluster/meta/meta.pkg.crossplane.io_providers.yaml index 69bf74a19..b7997e5a9 100644 --- a/cluster/meta/meta.pkg.crossplane.io_providers.yaml +++ b/cluster/meta/meta.pkg.crossplane.io_providers.yaml @@ -62,6 +62,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -70,6 +71,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that @@ -77,12 +79,14 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents @@ -90,6 +94,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object @@ -186,6 +191,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic nonResourceURLs: description: |- NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path @@ -194,6 +200,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resourceNames: description: ResourceNames is an optional white list of names that the rule applies to. An empty set means that @@ -201,12 +208,14 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic resources: description: Resources is a list of resources this rule applies to. '*' represents all resources. items: type: string type: array + x-kubernetes-list-type: atomic verbs: description: Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents @@ -214,6 +223,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - verbs type: object diff --git a/cmd/crank/beta/beta.go b/cmd/crank/beta/beta.go index 9f027f0e2..1d3540abf 100644 --- a/cmd/crank/beta/beta.go +++ b/cmd/crank/beta/beta.go @@ -21,11 +21,9 @@ package beta import ( "github.com/crossplane/crossplane/cmd/crank/beta/convert" - "github.com/crossplane/crossplane/cmd/crank/beta/render" "github.com/crossplane/crossplane/cmd/crank/beta/top" "github.com/crossplane/crossplane/cmd/crank/beta/trace" "github.com/crossplane/crossplane/cmd/crank/beta/validate" - "github.com/crossplane/crossplane/cmd/crank/beta/xpkg" ) // Cmd contains beta commands. @@ -33,10 +31,8 @@ type Cmd struct { // Subcommands and flags will appear in the CLI help output in the same // order they're specified here. Keep them in alphabetical order. Convert convert.Cmd `cmd:"" help:"Convert a Crossplane resource to a newer version or kind."` - Render render.Cmd `cmd:"" help:"Render a composite resource (XR)."` Top top.Cmd `cmd:"" help:"Display resource (CPU/memory) usage by Crossplane related pods."` Trace trace.Cmd `cmd:"" help:"Trace a Crossplane resource to get a detailed output of its relationships, helpful for troubleshooting."` - XPKG xpkg.Cmd `cmd:"" help:"Manage Crossplane packages."` Validate validate.Cmd `cmd:"" help:"Validate Crossplane resources."` } diff --git a/cmd/crank/beta/convert/deploymentruntime/cmd.go b/cmd/crank/beta/convert/deploymentruntime/cmd.go index 4c3bf4a5a..f1ada34f7 100644 --- a/cmd/crank/beta/convert/deploymentruntime/cmd.go +++ b/cmd/crank/beta/convert/deploymentruntime/cmd.go @@ -34,10 +34,10 @@ import ( // Cmd arguments and flags for convert deployment-runtime subcommand. type Cmd struct { // Arguments. - InputFile string `arg:"" type:"path" optional:"" default:"-" help:"The ControllerConfig file to be Converted. If not specified or '-', stdin will be used."` + InputFile string `arg:"" default:"-" help:"The ControllerConfig file to be Converted. If not specified or '-', stdin will be used." optional:"" type:"path"` // Flags. - OutputFile string `short:"o" type:"path" placeholder:"PATH" help:"The file to write the generated DeploymentRuntimeConfig to. If not specified, stdout will be used."` + OutputFile string `help:"The file to write the generated DeploymentRuntimeConfig to. If not specified, stdout will be used." placeholder:"PATH" short:"o" type:"path"` fs afero.Fs } diff --git a/cmd/crank/beta/convert/deploymentruntime/converter.go b/cmd/crank/beta/convert/deploymentruntime/converter.go index bc3a35931..f5efb558b 100644 --- a/cmd/crank/beta/convert/deploymentruntime/converter.go +++ b/cmd/crank/beta/convert/deploymentruntime/converter.go @@ -18,7 +18,6 @@ package deploymentruntime import ( "errors" - "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -30,16 +29,14 @@ import ( ) const ( - // default container name that XP uses + // default container name that XP uses. runtimeContainerName = "package-runtime" errNilControllerConfig = "ControllerConfig is nil" ) -var timeNow = time.Now() - // controllerConfigToDeploymentRuntimeConfig converts a ControllerConfig to -// a DeploymentRuntimeConfig +// a DeploymentRuntimeConfig. func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (*v1beta1.DeploymentRuntimeConfig, error) { if cc == nil { return nil, errors.New(errNilControllerConfig) @@ -49,7 +46,7 @@ func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (* withName(cc.Name), // set the creation timestamp due to https://github.com/kubernetes/kubernetes/issues/109427 // to be removed when fixed. k8s apply ignores this field - withCreationTimestamp(metav1.NewTime(timeNow)), + withCreationTimestamp(metav1.Now()), withServiceAccountTemplate(cc), withServiceTemplate(cc), withDeploymentTemplate(dt), @@ -57,7 +54,7 @@ func controllerConfigToDeploymentRuntimeConfig(cc *v1alpha1.ControllerConfig) (* return drc, nil } -func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1beta1.DeploymentTemplate { //nolint:gocyclo // Just a lot of if, then set field +func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1beta1.DeploymentTemplate { if cc == nil || !shouldCreateDeploymentTemplate(cc) { return nil } @@ -80,7 +77,7 @@ func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1be // set the creation timestamp due to https://github.com/kubernetes/kubernetes/issues/109427 // to be removed when fixed. k8s apply ignores this field if cc.CreationTimestamp.IsZero() || dt.Spec.Template.ObjectMeta.CreationTimestamp.IsZero() { - dt.Spec.Template.ObjectMeta.CreationTimestamp = metav1.NewTime(timeNow) + dt.Spec.Template.ObjectMeta.CreationTimestamp = metav1.Now() } if cc.Spec.Metadata != nil { @@ -135,7 +132,7 @@ func deploymentTemplateFromControllerConfig(cc *v1alpha1.ControllerConfig) *v1be return dt } -func containerFromControllerConfig(cc *v1alpha1.ControllerConfig) *corev1.Container { //nolint:gocyclo // Just a lot of if, then set field +func containerFromControllerConfig(cc *v1alpha1.ControllerConfig) *corev1.Container { if cc == nil || !shouldCreateDeploymentTemplateContainer(cc) { return nil } @@ -165,8 +162,7 @@ func containerFromControllerConfig(cc *v1alpha1.ControllerConfig) *corev1.Contai c.Env = append(c.Env, cc.Spec.Env...) } if len(cc.Spec.VolumeMounts) > 0 { - c.VolumeMounts = - append(c.VolumeMounts, cc.Spec.VolumeMounts...) + c.VolumeMounts = append(c.VolumeMounts, cc.Spec.VolumeMounts...) } if cc.Spec.ResourceRequirements != nil { c.Resources = *cc.Spec.ResourceRequirements.DeepCopy() @@ -235,8 +231,8 @@ func withDeploymentTemplate(dt *v1beta1.DeploymentTemplate) func(*v1beta1.Deploy } // shouldCreateDeploymentTemplate determines whether we should create a deployment -// template in the DeploymentRuntimeConfig -func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { //nolint:gocyclo // There are a lot of triggers for this, but it's not complex +// template in the DeploymentRuntimeConfig. +func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { return len(cc.Labels) > 0 || len(cc.Annotations) > 0 || cc.Spec.Metadata != nil || @@ -255,7 +251,7 @@ func shouldCreateDeploymentTemplate(cc *v1alpha1.ControllerConfig) bool { //noli } // shouldCreateDeploymentTemplateContainer determines whether we should create a container -// entry in the DeploymentRuntimeConfig +// entry in the DeploymentRuntimeConfig. func shouldCreateDeploymentTemplateContainer(cc *v1alpha1.ControllerConfig) bool { return cc.Spec.Image != nil || cc.Spec.ImagePullPolicy != nil || diff --git a/cmd/crank/beta/convert/deploymentruntime/converter_test.go b/cmd/crank/beta/convert/deploymentruntime/converter_test.go index af85ebde7..268db1701 100644 --- a/cmd/crank/beta/convert/deploymentruntime/converter_test.go +++ b/cmd/crank/beta/convert/deploymentruntime/converter_test.go @@ -79,12 +79,13 @@ func TestNewDeploymentTemplateFromControllerConfig(t *testing.T) { Affinity: &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{{ - MatchFields: []corev1.NodeSelectorRequirement{ - {Key: "xplane"}, + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchFields: []corev1.NodeSelectorRequirement{ + {Key: "xplane"}, + }, }, }, - }, }, }, }, @@ -133,31 +134,33 @@ func TestNewDeploymentTemplateFromControllerConfig(t *testing.T) { Affinity: &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{{ - MatchFields: []corev1.NodeSelectorRequirement{ - {Key: "xplane"}, + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchFields: []corev1.NodeSelectorRequirement{ + {Key: "xplane"}, + }, }, }, - }, }, }, }, - Containers: []corev1.Container{{ - Name: "package-runtime", - Args: []string{"- -d", "- --enable-management-policies"}, - Image: image, - Resources: corev1.ResourceRequirements{ - Limits: map[corev1.ResourceName]resource.Quantity{ - "cpu": *resource.NewMilliQuantity(5000, resource.DecimalSI), - "memory": *resource.NewQuantity(10*1024*1024*1024, resource.BinarySI), - }, - Requests: map[corev1.ResourceName]resource.Quantity{ - "cpu": *resource.NewMilliQuantity(1500, resource.DecimalSI), - "memory": *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), + Containers: []corev1.Container{ + { + Name: "package-runtime", + Args: []string{"- -d", "- --enable-management-policies"}, + Image: image, + Resources: corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + "cpu": *resource.NewMilliQuantity(5000, resource.DecimalSI), + "memory": *resource.NewQuantity(10*1024*1024*1024, resource.BinarySI), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + "cpu": *resource.NewMilliQuantity(1500, resource.DecimalSI), + "memory": *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI), + }, }, + VolumeMounts: []corev1.VolumeMount{{Name: "mount1", MountPath: "/tmp"}, {Name: "mount2", MountPath: "/etc/ssl/certs"}}, }, - VolumeMounts: []corev1.VolumeMount{{Name: "mount1", MountPath: "/tmp"}, {Name: "mount2", MountPath: "/etc/ssl/certs"}}, - }, }, ImagePullSecrets: []corev1.LocalObjectReference{{Name: "my-secret"}}, @@ -284,7 +287,8 @@ func TestControllerConfigToRuntimeDeploymentConfig(t *testing.T) { Labels: map[string]string{}, CreationTimestamp: timeNow, }, - }}, + }, + }, }, }, }, @@ -323,7 +327,6 @@ func TestNewContainerFromControllerConfig(t *testing.T) { args args want want }{ - "NilControllerConfig": { reason: "Correctly return an empty container", args: args{ @@ -378,7 +381,6 @@ func TestNewContainerFromControllerConfig(t *testing.T) { if diff := cmp.Diff(tc.want.c, c, cmpopts.EquateApproxTime(time.Second*2)); diff != "" { t.Errorf("%s\ncontainerFromControllerConfig(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } diff --git a/cmd/crank/beta/convert/io/io.go b/cmd/crank/beta/convert/io/io.go index 0a3ba55dc..22ae64818 100644 --- a/cmd/crank/beta/convert/io/io.go +++ b/cmd/crank/beta/convert/io/io.go @@ -54,7 +54,7 @@ func WriteObjectYAML(fs afero.Fs, outputFile string, o runtime.Object) error { var output io.Writer if outputFile != "" { - f, err := fs.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY, 0644) + f, err := fs.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return errors.Wrap(err, "Unable to open output file") } diff --git a/cmd/crank/beta/convert/pipelinecomposition/cmd.go b/cmd/crank/beta/convert/pipelinecomposition/cmd.go index 236d43a40..7dd91a101 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/cmd.go +++ b/cmd/crank/beta/convert/pipelinecomposition/cmd.go @@ -32,11 +32,11 @@ import ( // Cmd arguments and flags for converting a patch-and-transform to a function pipeline composition. type Cmd struct { // Arguments. - InputFile string `arg:"" type:"path" optional:"" default:"-" help:"The Composition file to be converted. If not specified or '-', stdin will be used."` + InputFile string `arg:"" default:"-" help:"The Composition file to be converted. If not specified or '-', stdin will be used." optional:"" type:"path"` // Flags. - OutputFile string `short:"o" type:"path" placeholder:"PATH" help:"The file to write the generated Composition to. If not specified, stdout will be used."` - FunctionName string `short:"f" type:"string" placeholder:"STRING" help:"FunctionRefName. Defaults to function-patch-and-transform."` + OutputFile string `help:"The file to write the generated Composition to. If not specified, stdout will be used." placeholder:"PATH" short:"o" type:"path"` + FunctionName string `help:"FunctionRefName. Defaults to function-patch-and-transform." placeholder:"STRING" short:"f" type:"string"` fs afero.Fs } diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter.go b/cmd/crank/beta/convert/pipelinecomposition/converter.go index 8e39413ab..174c1e5c5 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter.go @@ -27,6 +27,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" + commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) @@ -93,7 +95,7 @@ func convertPnTToPipeline(c *v1.Composition, functionRefName string) (*v1.Compos } // Override function name if provided - var fr = v1.FunctionReference{Name: defaultFunctionRefName} + fr := v1.FunctionReference{Name: defaultFunctionRefName} if functionRefName != "" { fr.Name = functionRefName } @@ -113,9 +115,9 @@ func convertPnTToPipeline(c *v1.Composition, functionRefName string) (*v1.Compos } // processFunctionInput populates any missing fields in the input to the function -// that are required by the function but were optional in the built-in engine +// that are required by the function but were optional in the built-in engine. func processFunctionInput(input *Input) *runtime.RawExtension { - var processedInput = &Input{} + processedInput := &Input{} // process Environment Patches if input.Environment != nil && len(input.Environment.Patches) > 0 { @@ -143,12 +145,12 @@ func processFunctionInput(input *Input) *runtime.RawExtension { processedInput.Resources = processedResources // Wrap the input in a RawExtension - var inputType = map[string]any{ + inputType := map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", - "environment": processedInput.Environment.DeepCopy(), - "patchSets": processedInput.PatchSets, - "resources": processedInput.Resources, + "environment": MigratePatchPolicyInEnvironment(processedInput.Environment.DeepCopy()), + "patchSets": MigratePatchPolicyInPatchSets(processedInput.PatchSets), + "resources": MigratePatchPolicyInResources(processedInput.Resources), } return &runtime.RawExtension{ @@ -156,6 +158,143 @@ func processFunctionInput(input *Input) *runtime.RawExtension { } } +// MigratePatchPolicyInResources processes all the patches in the given resources to migrate their patch policies. +func MigratePatchPolicyInResources(resources []v1.ComposedTemplate) []ComposedTemplate { + composedTemplates := []ComposedTemplate{} + + for _, resource := range resources { + composedTemplate := ComposedTemplate{} + composedTemplate.ComposedTemplate = resource + composedTemplate.Patches = migratePatches(resource.Patches) + // Conversion function above overrides the patches in the new type, + // so after conversion we set the underlying patches to nil to make sure + // there's no conflict in the serialized output. + composedTemplate.ComposedTemplate.Patches = nil + composedTemplates = append(composedTemplates, composedTemplate) + } + return composedTemplates +} + +// MigratePatchPolicyInPatchSets processes all the patches in the given patch set to migrate their patch policies. +func MigratePatchPolicyInPatchSets(patchset []v1.PatchSet) []PatchSet { + newPatchSets := []PatchSet{} + + for _, patchSet := range patchset { + newpatchset := PatchSet{} + newpatchset.Name = patchSet.Name + newpatchset.Patches = migratePatches(patchSet.Patches) + + newPatchSets = append(newPatchSets, newpatchset) + } + + return newPatchSets +} + +// MigratePatchPolicyInEnvironment processes all the patches in the given +// environment configuration to migrate their patch policies. +func MigratePatchPolicyInEnvironment(ec *v1.EnvironmentConfiguration) *Environment { + if ec == nil || len(ec.Patches) == 0 { + return nil + } + + return &Environment{ + Patches: migrateEnvPatches(ec.Patches), + } +} + +func migratePatches(patches []v1.Patch) []Patch { + newPatches := []Patch{} + + for _, patch := range patches { + newpatch := Patch{} + newpatch.Patch = patch + + if patch.Policy != nil { + newpatch.Policy = migratePatchPolicy(patch.Policy) + // Conversion function above overrides the patch policy in the new type, + // so after conversion we set underlying policy to nil to make sure + // there's no conflict in the serialized output. + newpatch.Patch.Policy = nil + } + + newPatches = append(newPatches, newpatch) + } + + return newPatches +} + +func migrateEnvPatches(envPatches []v1.EnvironmentPatch) []EnvironmentPatch { + newEnvPatches := []EnvironmentPatch{} + + for _, envPatch := range envPatches { + newEnvPatch := EnvironmentPatch{} + newEnvPatch.EnvironmentPatch = envPatch + + if envPatch.Policy != nil { + newEnvPatch.Policy = migratePatchPolicy(envPatch.Policy) + // Conversion function above overrides the patch policy in the new type, + // so after conversion we set underlying policy to nil to make sure + // there's no conflict in the serialized output. + newEnvPatch.EnvironmentPatch.Policy = nil + } + + newEnvPatches = append(newEnvPatches, newEnvPatch) + } + + return newEnvPatches +} + +func migratePatchPolicy(policy *v1.PatchPolicy) *PatchPolicy { + to := migrateMergeOptions(policy.MergeOptions) + + if to == nil && policy.FromFieldPath == nil { + // neither To nor From has been set, just return nil to use defaults for + // everything + return nil + } + + return &PatchPolicy{ + FromFieldPath: policy.FromFieldPath, + ToFieldPath: to, + } +} + +// migrateMergeOptions implements the conversion of mergeOptions to the new +// toFieldPath policy. The conversion logic is described in +// https://github.com/crossplane-contrib/function-patch-and-transform/?tab=readme-ov-file#mergeoptions-replaced-by-tofieldpath. +func migrateMergeOptions(mo *commonv1.MergeOptions) *ToFieldPathPolicy { + if mo == nil { + // No merge options at all, default to nil which will mean Replace + return nil + } + + if isTrue(mo.KeepMapValues) { + if isNilOrFalse(mo.AppendSlice) { + // { appendSlice: nil/false, keepMapValues: true} + return ptr.To(ToFieldPathPolicyMergeObjects) + } + + // { appendSlice: true, keepMapValues: true } + return ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays) + } + + if isTrue(mo.AppendSlice) { + // { appendSlice: true, keepMapValues: nil/false } + return ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays) + } + + // { appendSlice: nil/false, keepMapValues: nil/false } + return ptr.To(ToFieldPathPolicyForceMergeObjects) +} + +func isNilOrFalse(b *bool) bool { + return b == nil || !*b +} + +func isTrue(b *bool) bool { + return b != nil && *b +} + func setMissingPatchSetFields(patchSet v1.PatchSet) v1.PatchSet { p := []v1.Patch{} for _, patch := range patchSet.Patches { @@ -215,7 +354,7 @@ func setMissingResourceFields(idx int, rs v1.ComposedTemplate) v1.ComposedTempla } // setTransformTypeRequiredFields sets fields that are required with -// function-patch-and-transform but were optional with the built-in engine +// function-patch-and-transform but were optional with the built-in engine. func setTransformTypeRequiredFields(tt v1.Transform) v1.Transform { if tt.Type == "" { if tt.Math != nil { diff --git a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go index 37629f34a..f8406626a 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/converter_test.go +++ b/cmd/crank/beta/convert/pipelinecomposition/converter_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" commonv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -105,7 +106,6 @@ func TestSetMissingConnectionDetailFields(t *testing.T) { if diff := cmp.Diff(tc.want.sk, sk); diff != "" { t.Errorf("%s\nsetMissingConnectionDetailFields(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } @@ -181,6 +181,12 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, }, { Type: v1.PatchTypeCombineFromComposite, @@ -224,6 +230,11 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, + Policy: &v1.PatchPolicy{ + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(true), + }, + }, }, }, Policy: &commonv1.Policy{ @@ -262,42 +273,18 @@ func TestConvertPnTToPipeline(t *testing.T) { Object: map[string]any{ "apiVersion": string("pt.fn.crossplane.io/v1beta1"), "kind": string("Resources"), - "environment": &v1.EnvironmentConfiguration{ - Patches: []v1.EnvironmentPatch{ - { - Type: typeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, - }, + "environment": &Environment{ + Patches: []EnvironmentPatch{ { - Type: typeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, - Transforms: []v1.Transform{ - { - Type: v1.TransformTypeString, - String: &v1.StringTransform{ - Format: &stringFmt, - Type: v1.StringTransformTypeFormat, - }, - }, - { - Type: v1.TransformTypeMath, - Math: &v1.MathTransform{ - Multiply: &intp, - Type: v1.MathTransformTypeMultiply, - }, - }, + EnvironmentPatch: v1.EnvironmentPatch{ + Type: typeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, }, }, - }, - }, - "patchSets": []v1.PatchSet{ - { - Name: "test-patchset", - Patches: []v1.Patch{ - { - Type: v1.PatchTypeFromCompositeFieldPath, + { + EnvironmentPatch: v1.EnvironmentPatch{ + Type: typeFromCompositeFieldPath, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, Transforms: []v1.Transform{ @@ -317,15 +304,54 @@ func TestConvertPnTToPipeline(t *testing.T) { }, }, }, + Policy: &PatchPolicy{ + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), + }, + }, + }, + }, + "patchSets": []PatchSet{ + { + Name: "test-patchset", + Patches: []Patch{ { - Type: v1.PatchTypeCombineFromComposite, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + Patch: v1.Patch{ + Type: v1.PatchTypeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + Transforms: []v1.Transform{ + { + Type: v1.TransformTypeString, + String: &v1.StringTransform{ + Format: &stringFmt, + Type: v1.StringTransformTypeFormat, + }, + }, + { + Type: v1.TransformTypeMath, + Math: &v1.MathTransform{ + Multiply: &intp, + Type: v1.MathTransformTypeMultiply, + }, + }, + }, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + { + Patch: v1.Patch{ + Type: v1.PatchTypeCombineFromComposite, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, }, }, }, - "resources": []v1.ComposedTemplate{}, + "resources": []ComposedTemplate{}, }, }, }, @@ -467,7 +493,6 @@ func TestSetTransformTypeRequiredFields(t *testing.T) { if diff := cmp.Diff(tc.want.tt, tt); diff != "" { t.Errorf("%s\nsetTransformTypeRequiredFields(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } @@ -495,9 +520,9 @@ func TestProcessFunctionInput(t *testing.T) { Object: map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", - "environment": (*v1.EnvironmentConfiguration)(nil), - "patchSets": []v1.PatchSet{}, - "resources": []v1.ComposedTemplate{}, + "environment": (*Environment)(nil), + "patchSets": []PatchSet{}, + "resources": []ComposedTemplate{}, }, }, }, @@ -552,49 +577,55 @@ func TestProcessFunctionInput(t *testing.T) { Object: map[string]any{ "apiVersion": "pt.fn.crossplane.io/v1beta1", "kind": "Resources", - "environment": &v1.EnvironmentConfiguration{ - Patches: []v1.EnvironmentPatch{ + "environment": &Environment{ + Patches: []EnvironmentPatch{ { - Type: typeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + EnvironmentPatch: v1.EnvironmentPatch{ + Type: typeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, }, }, - "patchSets": []v1.PatchSet{ + "patchSets": []PatchSet{ { Name: "test-patchset", - Patches: []v1.Patch{ + Patches: []Patch{ { - Type: v1.PatchTypeFromCompositeFieldPath, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, - Transforms: []v1.Transform{ - { - Type: v1.TransformTypeString, - String: &v1.StringTransform{ - Format: &stringFmt, - Type: v1.StringTransformTypeFormat, + Patch: v1.Patch{ + Type: v1.PatchTypeFromCompositeFieldPath, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + Transforms: []v1.Transform{ + { + Type: v1.TransformTypeString, + String: &v1.StringTransform{ + Format: &stringFmt, + Type: v1.StringTransformTypeFormat, + }, }, - }, - { - Type: v1.TransformTypeMath, - Math: &v1.MathTransform{ - Multiply: &intp, - Type: v1.MathTransformTypeMultiply, + { + Type: v1.TransformTypeMath, + Math: &v1.MathTransform{ + Multiply: &intp, + Type: v1.MathTransformTypeMultiply, + }, }, }, }, }, { - Type: v1.PatchTypeCombineFromComposite, - FromFieldPath: &fieldPath, - ToFieldPath: &fieldPath, + Patch: v1.Patch{ + Type: v1.PatchTypeCombineFromComposite, + FromFieldPath: &fieldPath, + ToFieldPath: &fieldPath, + }, }, }, }, }, - "resources": []v1.ComposedTemplate{}, + "resources": []ComposedTemplate{}, }, }, }, @@ -692,7 +723,6 @@ func TestSetMissingPatchSetFields(t *testing.T) { if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("%s\nsetMissingPatchSetFields(...): -want i, +got i:\n%s", tc.reason, diff) } - }) } } @@ -721,7 +751,8 @@ func TestSetMissingEnvironmentPatchFields(t *testing.T) { Type: v1.PatchTypeCombineFromComposite, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, "TransformArrayMissingFields": { reason: "Nested missing Types are filled in for a transform array", args: args{ @@ -776,7 +807,8 @@ func TestSetMissingEnvironmentPatchFields(t *testing.T) { Type: v1.PatchTypeFromCompositeFieldPath, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { @@ -812,7 +844,8 @@ func TestSetMissingPatchFields(t *testing.T) { Type: v1.PatchTypeCombineFromComposite, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, "TransformArrayMissingFields": { reason: "Nested missing Types are filled in for a transform array", args: args{ @@ -867,7 +900,8 @@ func TestSetMissingPatchFields(t *testing.T) { Type: v1.PatchTypeFromCompositeFieldPath, FromFieldPath: &fieldPath, ToFieldPath: &fieldPath, - }}, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { @@ -884,7 +918,7 @@ func TestSetMissingResourceFields(t *testing.T) { empty := "" str := "crossplane" fcsk := v1.ConnectionDetailTypeFromConnectionSecretKey - var baseNoName = map[string]any{ + baseNoName := map[string]any{ "apiVersion": "nop.crossplane.io/v1", "kind": "TestResource", "spec": map[string]any{}, @@ -980,3 +1014,394 @@ func TestSetMissingResourceFields(t *testing.T) { }) } } + +func TestMigratePatchPolicyInResources(t *testing.T) { + cases := map[string]struct { + reason string + args []v1.ComposedTemplate + want []ComposedTemplate + }{ + "ResourcesHasSimplePatches": { + reason: "Composed Resources has simple patches", + args: []v1.ComposedTemplate{ + { + Name: ptr.To("resource-0"), + Patches: []v1.Patch{ + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + }, + }, + }, + }, + want: []ComposedTemplate{ + { + ComposedTemplate: v1.ComposedTemplate{ + Name: ptr.To("resource-0"), + Patches: nil, + }, + Patches: []Patch{ + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: nil, + }, + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := MigratePatchPolicyInResources(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MigratePatchPolicyInResources() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestMigratePatchPolicyInPatchSets(t *testing.T) { + cases := map[string]struct { + reason string + args []v1.PatchSet + want []PatchSet + }{ + "PatchSetHasSimplePatches": { + reason: "PatchSet has simple patches", + args: []v1.PatchSet{ + { + Name: "patchset-0", + Patches: []v1.Patch{ + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + }, + }, + }, + }, + want: []PatchSet{ + { + Name: "patchset-0", + Patches: []Patch{ + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: nil, + }, + { + Patch: v1.Patch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := MigratePatchPolicyInPatchSets(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MigratePatchPolicyInPatchSets() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestMigratePatchPolicyInEnvironment(t *testing.T) { + cases := map[string]struct { + reason string + args *v1.EnvironmentConfiguration + want *Environment + }{ + "EnvironmentNil": { + reason: "Environment is nil", + args: nil, + want: nil, + }, + "EnvironmentHasNoPatches": { + reason: "Environment has no patches", + args: &v1.EnvironmentConfiguration{Patches: []v1.EnvironmentPatch{}}, + want: nil, + }, + "EnvironmentHasSimplePatches": { + reason: "Environment has simple patches", + args: &v1.EnvironmentConfiguration{ + Patches: []v1.EnvironmentPatch{ + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + { + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + }, + }, + }, + want: &Environment{ + Patches: []EnvironmentPatch{ + { + EnvironmentPatch: v1.EnvironmentPatch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: nil, + }, + { + EnvironmentPatch: v1.EnvironmentPatch{ + Type: v1.PatchTypeToCompositeFieldPath, + FromFieldPath: ptr.To("envVal"), + ToFieldPath: ptr.To("spec.val"), + Policy: nil, + }, + Policy: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := MigratePatchPolicyInEnvironment(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("MigratePatchPolicyInEnvironment() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +/* +# MergeOptions appendSlice keepMapValues policy.toFieldPath +1 nil N/A n/A nil (defaults to Replace) +2 non-nil nil or false true MergeObjects +3 non-nil true nil or false ForceMergeObjectsAppendArrays +4 non-nil nil or false nil or false ForceMergeObjects +5 non-nil true True MergeObjectsAppendArrays +*/ + +func TestPatchPolicy(t *testing.T) { + cases := map[string]struct { + reason string + args *v1.PatchPolicy + want *PatchPolicy + }{ + "PatchPolicyWithNilMergeOptionsAndFromFieldPath": { // case 1 + reason: "MergeOptions and FromFieldPath are nil", + args: &v1.PatchPolicy{ + FromFieldPath: nil, + MergeOptions: nil, + }, + want: nil, + }, + "PatchPolicyWithNilMergeOptions": { // case 1 + reason: "MergeOptions is nil", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: nil, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: nil, + }, + }, + "PatchPolicyWithKeepMapValuesTrueAppendSliceNil": { + reason: "AppendSlice is nil && KeepMapValues is true", // case 2 + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + "PatchPolicyWithKeepMapValuesTrueAppendSliceFalse": { + reason: "AppendSlice is false && KeepMapValues is true", // case 2 + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + AppendSlice: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjects), + }, + }, + "PatchPolicyWithTrueAppendSliceInMergeOptions": { // case 3 + reason: "AppendSlice is true && KeepMapValues is nil", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), + }, + }, + "PatchPolicyWithTrueAppendSliceFalseKeepMapValuesInMergeOptions": { // case 3 + reason: "AppendSlice is true && KeepMapValues is false", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(true), + KeepMapValues: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjectsAppendArrays), + }, + }, + "PatchPolicyWithEmptyMergeOptions": { // case 4 + reason: "Both AppendSlice and KeepMapValues are nil", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{}, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithNilKeepMapValuesInMergeOptions": { // case 4 + reason: "AppendSlice is false and KeepMapValues is nil", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + AppendSlice: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithNilAppendSliceInMergeOptions": { // case 4 + reason: "AppendSlice is nil and KeepMapValues is false", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyOptional), + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithBothKeepMapValuesAndAppendSliceFalse": { // case 4 + reason: "Both KeepMapValues and AppendSlice is false", + args: &v1.PatchPolicy{ + FromFieldPath: nil, + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(false), + AppendSlice: ptr.To(false), + }, + }, + want: &PatchPolicy{ + FromFieldPath: nil, + ToFieldPath: ptr.To(ToFieldPathPolicyForceMergeObjects), + }, + }, + "PatchPolicyWithKeepMapValuesTrueAppendSliceTrue": { // case 5 + reason: "Both KeepMapValues and AppendSlice is true", + args: &v1.PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + MergeOptions: &commonv1.MergeOptions{ + KeepMapValues: ptr.To(true), + AppendSlice: ptr.To(true), + }, + }, + want: &PatchPolicy{ + FromFieldPath: ptr.To(v1.FromFieldPathPolicyRequired), + ToFieldPath: ptr.To(ToFieldPathPolicyMergeObjectsAppendArrays), + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := migratePatchPolicy(tc.args) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("%s\npatchPolicy(...): -want i, +got i:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/cmd/crank/beta/convert/pipelinecomposition/types.go b/cmd/crank/beta/convert/pipelinecomposition/types.go index 001639141..1ae9bb2ae 100644 --- a/cmd/crank/beta/convert/pipelinecomposition/types.go +++ b/cmd/crank/beta/convert/pipelinecomposition/types.go @@ -18,9 +18,13 @@ package pipelinecomposition import v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" -// Input represents the input to the patch-and-transform function -// This struct is copied from function patch and transform, as we can't import it directly +// Input represents the input to the patch-and-transform function. This struct +// originates from function patch and transform, as we can't import it directly // https://github.com/crossplane-contrib/function-patch-and-transform/blob/main/input/v1beta1/resources.go +// Note that it does not exactly match the target type with full fidelity. +// This type is used during the processing and conversion of the given input, +// but the final converted output is written in an unstructured manner without a +// static type definition for more flexibility. type Input struct { // PatchSets define a named set of patches that may be included by any // resource in this Composition. PatchSets cannot themselves refer to other @@ -47,3 +51,55 @@ type Input struct { // +optional Resources []v1.ComposedTemplate `json:"resources,omitempty"` } + +// PatchSet wrapper around v1.PatchSet with custom Patch. +type PatchSet struct { + // Name of this PatchSet. + Name string `json:"name"` + + Patches []Patch `json:"patches"` +} + +// ComposedTemplate wrapper around v1.ComposedTemplate with custom Patch. +type ComposedTemplate struct { + v1.ComposedTemplate + + Patches []Patch `json:"patches,omitempty"` +} + +// Patch wrapper around v1.Patch with custom PatchPolicy. +type Patch struct { + v1.Patch + + Policy *PatchPolicy `json:"policy,omitempty"` +} + +// Environment represents the Composition environment. +type Environment struct { + Patches []EnvironmentPatch `json:"patches,omitempty"` +} + +// EnvironmentPatch wrapper around v1.EnvironmentPatch with custom PatchPolicy. +type EnvironmentPatch struct { + v1.EnvironmentPatch + + Policy *PatchPolicy `json:"policy,omitempty"` +} + +// A ToFieldPathPolicy determines how to patch to a field path. +type ToFieldPathPolicy string + +// ToFieldPathPatchPolicy defines the policy for the ToFieldPath in a Patch. +const ( + ToFieldPathPolicyReplace ToFieldPathPolicy = "Replace" + ToFieldPathPolicyMergeObjects ToFieldPathPolicy = "MergeObjects" + ToFieldPathPolicyMergeObjectsAppendArrays ToFieldPathPolicy = "MergeObjectsAppendArrays" + ToFieldPathPolicyForceMergeObjects ToFieldPathPolicy = "ForceMergeObjects" + ToFieldPathPolicyForceMergeObjectsAppendArrays ToFieldPathPolicy = "ForceMergeObjectsAppendArrays" +) + +// PatchPolicy defines the policy for a patch. +type PatchPolicy struct { + FromFieldPath *v1.FromFieldPathPolicy `json:"fromFieldPath,omitempty"` + ToFieldPath *ToFieldPathPolicy `json:"toFieldPath,omitempty"` +} diff --git a/cmd/crank/beta/top/top.go b/cmd/crank/beta/top/top.go index 12ff6dc3e..47c375df5 100644 --- a/cmd/crank/beta/top/top.go +++ b/cmd/crank/beta/top/top.go @@ -51,8 +51,8 @@ const ( // Cmd represents the top command. type Cmd struct { - Summary bool `short:"s" name:"summary" help:"Adds summary header for all Crossplane pods."` - Namespace string `short:"n" name:"namespace" help:"Show pods from a specific namespace, defaults to crossplane-system." default:"crossplane-system"` + Summary bool `help:"Adds summary header for all Crossplane pods." name:"summary" short:"s"` + Namespace string `default:"crossplane-system" help:"Show pods from a specific namespace, defaults to crossplane-system." name:"namespace" short:"n"` } // Help returns help instructions for the top command. @@ -101,7 +101,7 @@ func (r *defaultPrinterRow) String() string { } // Run runs the top command. -func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // TODO:(piotr1215) refactor to use dedicated functions +func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { logger = logger.WithValues("cmd", "top") logger.Debug("Tabwriter header created") @@ -130,7 +130,6 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc ctx := context.Background() pods, err := k8sClientset.CoreV1().Pods(c.Namespace).List(ctx, metav1.ListOptions{}) - if err != nil { return errors.Wrap(err, errFetchAllPods) } @@ -139,7 +138,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc logger.Debug("Fetched all Crossplane pods", "pods", crossplanePods, "namespace", c.Namespace) if len(crossplanePods) == 0 { - fmt.Println("No Crossplane pods found in the namespace", c.Namespace) + _, _ = fmt.Fprintln(k.Stdout, "No Crossplane pods found in the namespace", c.Namespace) return nil } @@ -158,9 +157,6 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc } } - if err != nil { - return errors.Wrap(err, errGetPodMetrics) - } logger.Debug("Added metrics to Crossplane pods") sort.Slice(crossplanePods, func(i, j int) bool { @@ -173,7 +169,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc if c.Summary { printPodsSummary(k.Stdout, crossplanePods) logger.Debug("Printed pods summary") - fmt.Println() + _, _ = fmt.Fprintln(k.Stdout) } if err := printPodsTable(k.Stdout, crossplanePods); err != nil { @@ -231,7 +227,7 @@ func printPodsSummary(w io.Writer, pods []topMetrics) { } // Print summary directly to the provided writer - fmt.Fprintf(w, "Nr of Crossplane pods: %d\n", len(pods)) + _, _ = fmt.Fprintf(w, "Nr of Crossplane pods: %d\n", len(pods)) // Sort categories alphabetically to ensure consistent output categories := make([]string, 0, len(categoryCounts)) for category := range categoryCounts { @@ -239,10 +235,10 @@ func printPodsSummary(w io.Writer, pods []topMetrics) { } sort.Strings(categories) for _, category := range categories { - fmt.Fprintf(w, "%s: %d\n", capitalizeFirst(category), categoryCounts[category]) + _, _ = fmt.Fprintf(w, "%s: %d\n", capitalizeFirst(category), categoryCounts[category]) } - fmt.Fprintf(w, "Memory: %s\n", fmt.Sprintf("%vMi", totalMemoryUsage.Value()/(1024*1024))) - fmt.Fprintf(w, "CPU(cores): %s\n", fmt.Sprintf("%vm", totalCPUUsage.MilliValue())) + _, _ = fmt.Fprintf(w, "Memory: %s\n", fmt.Sprintf("%vMi", totalMemoryUsage.Value()/(1024*1024))) + _, _ = fmt.Fprintf(w, "CPU(cores): %s\n", fmt.Sprintf("%vm", totalCPUUsage.MilliValue())) } func getCrossplanePods(pods []v1.Pod) []topMetrics { diff --git a/cmd/crank/beta/top/top_test.go b/cmd/crank/beta/top/top_test.go index 368668a2d..77072f664 100644 --- a/cmd/crank/beta/top/top_test.go +++ b/cmd/crank/beta/top/top_test.go @@ -94,7 +94,6 @@ func TestGetCrossplanePods(t *testing.T) { }, { ObjectMeta: metav1.ObjectMeta{ - Name: "provider-azure-storage", Namespace: "crossplane-system", Labels: map[string]string{ @@ -154,6 +153,7 @@ func TestGetCrossplanePods(t *testing.T) { }) } } + func TestPrintPodsTable(t *testing.T) { type want struct { results string @@ -236,6 +236,7 @@ function crossplane-system function-123 200m 1024Mi }) } } + func TestPrintPodsSummary(t *testing.T) { type want struct { results string @@ -297,7 +298,6 @@ CPU(cores): 900000m } }) } - } func TestCapitalizeFirst(t *testing.T) { diff --git a/cmd/crank/beta/trace/internal/printer/default.go b/cmd/crank/beta/trace/internal/printer/default.go index 7cd75eaee..fe16c8197 100644 --- a/cmd/crank/beta/trace/internal/printer/default.go +++ b/cmd/crank/beta/trace/internal/printer/default.go @@ -42,7 +42,7 @@ const ( errFlushTabWriter = "cannot flush tab writer" ) -// DefaultPrinter defines the DefaultPrinter configuration +// DefaultPrinter defines the DefaultPrinter configuration. type DefaultPrinter struct { wide bool } @@ -129,7 +129,6 @@ func getHeaders(gk schema.GroupKind, wide bool) (headers fmt.Stringer, isPackage ready: "READY", status: "STATUS", }, false - } // Print implements the Printer interface by prints the resource tree in a @@ -212,11 +211,14 @@ func (p *DefaultPrinter) Print(w io.Writer, root *resource.Resource) error { // getResourceStatus returns a string that represents an entire row of status // information for the resource. -func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { //nolint:gocyclo // NOTE(phisco): just a few switches, not much to do here +func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { readyCond := r.GetCondition(xpv1.TypeReady) syncedCond := r.GetCondition(xpv1.TypeSynced) var status, m string switch { + case r.Unstructured.GetDeletionTimestamp() != nil: + // Report the status as deleted if the resource is being deleted + status = "Deleting" case r.Error != nil: // if there is an error we want to show it status = "Error" @@ -263,7 +265,7 @@ func getResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringe } } -func getPkgResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { //nolint:gocyclo // TODO: just a few switches, not much to do here +func getPkgResourceStatus(r *resource.Resource, name string, wide bool) fmt.Stringer { var err error var packageImg, state, status, m string diff --git a/cmd/crank/beta/trace/internal/printer/default_test.go b/cmd/crank/beta/trace/internal/printer/default_test.go index 3ea26e686..1dc93b1b2 100644 --- a/cmd/crank/beta/trace/internal/printer/default_test.go +++ b/cmd/crank/beta/trace/internal/printer/default_test.go @@ -53,6 +53,7 @@ func TestDefaultPrinter(t *testing.T) { }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` NAME SYNCED READY STATUS ObjectStorage/test-resource (default) True True @@ -75,6 +76,7 @@ ObjectStorage/test-resource (default) True True }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` NAME RESOURCE SYNCED READY STATUS ObjectStorage/test-resource (default) True True @@ -96,15 +98,16 @@ ObjectStorage/test-resource (default) True True }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` -NAME VERSION INSTALLED HEALTHY STATE STATUS -Configuration/platform-ref-aws v0.9.0 True True - HealthyPackageRevision -├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 v0.9.0 True True Active HealthyPackageRevision -└─ Configuration/upbound-configuration-aws-network upbound-configuration-aws-network v0.7.0 True True - HealthyPackageRevision - ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 v0.7.0 True True Active HealthyPackageRevision - └─ Provider/upbound-provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: ...der-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] - ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 v0.47.0 True False Active UnhealthyPackageRevision: ...ider package deployment has no condition of type "Available" yet - └─ Provider/upbound-provider-aws-something v0.47.0 True - - ActivePackageRevision +NAME VERSION INSTALLED HEALTHY STATE STATUS +Configuration/platform-ref-aws v0.9.0 True True - HealthyPackageRevision +├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 v0.9.0 True True Active HealthyPackageRevision +└─ Configuration/upbound-configuration-aws-network v0.7.0 True True - HealthyPackageRevision + ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 v0.7.0 True True Active HealthyPackageRevision + └─ Provider/upbound-provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: ...der-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] + ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 v0.47.0 True False Active UnhealthyPackageRevision: ...ider package deployment has no condition of type "Available" yet + └─ Provider/upbound-provider-aws-something v0.47.0 True - - ActivePackageRevision `, err: nil, }, @@ -117,15 +120,16 @@ Configuration/platform-ref-aws }, want: want{ // Note: Use spaces instead of tabs for indentation + //nolint:dupword // False positive for 'True True' output: ` -NAME PACKAGE VERSION INSTALLED HEALTHY STATE STATUS -Configuration/platform-ref-aws xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True - HealthyPackageRevision -├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True Active HealthyPackageRevision -└─ Configuration/upbound-configuration-aws-network upbound-configuration-aws-network xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True - HealthyPackageRevision - ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True Active HealthyPackageRevision - └─ Provider/upbound-provider-aws-ec2 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: cannot resolve package dependencies: incompatible dependencies: [xpkg.upbound.io/crossplane-contrib/provider-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] - ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True False Active UnhealthyPackageRevision: post establish runtime hook failed for package: provider package deployment has no condition of type "Available" yet - └─ Provider/upbound-provider-aws-something xpkg.upbound.io/upbound/provider-aws-something v0.47.0 True - - ActivePackageRevision +NAME PACKAGE VERSION INSTALLED HEALTHY STATE STATUS +Configuration/platform-ref-aws xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True - HealthyPackageRevision +├─ ConfigurationRevision/platform-ref-aws-9ad7b5db2899 xpkg.upbound.io/upbound/platform-ref-aws v0.9.0 True True Active HealthyPackageRevision +└─ Configuration/upbound-configuration-aws-network xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True - HealthyPackageRevision + ├─ ConfigurationRevision/upbound-configuration-aws-network-97be9100cfe1 xpkg.upbound.io/upbound/configuration-aws-network v0.7.0 True True Active HealthyPackageRevision + └─ Provider/upbound-provider-aws-ec2 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True Unknown - UnknownPackageRevisionHealth: cannot resolve package dependencies: incompatible dependencies: [xpkg.upbound.io/crossplane-contrib/provider-helm xpkg.upbound.io/crossplane-contrib/provider-kubernetes] + ├─ ProviderRevision/upbound-provider-aws-ec2-9ad7b5db2899 xpkg.upbound.io/upbound/provider-aws-ec2 v0.47.0 True False Active UnhealthyPackageRevision: post establish runtime hook failed for package: provider package deployment has no condition of type "Available" yet + └─ Provider/upbound-provider-aws-something xpkg.upbound.io/upbound/provider-aws-something v0.47.0 True - - ActivePackageRevision `, err: nil, }, @@ -151,5 +155,4 @@ Configuration/platform-ref-aws } }) } - } diff --git a/cmd/crank/beta/trace/internal/printer/dot.go b/cmd/crank/beta/trace/internal/printer/dot.go index a68db1b58..ba27ce1d2 100644 --- a/cmd/crank/beta/trace/internal/printer/dot.go +++ b/cmd/crank/beta/trace/internal/printer/dot.go @@ -16,9 +16,8 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource/xpkg" ) -// DotPrinter defines the DotPrinter configuration -type DotPrinter struct { -} +// DotPrinter defines the DotPrinter configuration. +type DotPrinter struct{} var _ Printer = &DotPrinter{} diff --git a/cmd/crank/beta/trace/internal/printer/dot_test.go b/cmd/crank/beta/trace/internal/printer/dot_test.go index 8743707b9..1fe7fb127 100644 --- a/cmd/crank/beta/trace/internal/printer/dot_test.go +++ b/cmd/crank/beta/trace/internal/printer/dot_test.go @@ -11,7 +11,7 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) -// Define a test for PrintDotGraph +// Define a test for PrintDotGraph. func TestPrintDotGraph(t *testing.T) { type args struct { resource *resource.Resource @@ -67,7 +67,7 @@ func TestPrintDotGraph(t *testing.T) { n1[label="Name: platform-ref-aws\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/platform-ref-aws:v0.9.0\nInstalled: True\nHealthy: True\n",penwidth="2"]; n2[label="Name: platform-ref-aws-9ad7b5db2899\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/platform-ref-aws:v0.9.0\nHealthy: True\nState: HealthyPackageRevision\n",penwidth="2"]; - n3[label="Name: upbound-configuration-aws-network upbound-configuration-aws-network\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0\nInstalled: True\nHealthy: True\n",penwidth="2"]; + n3[label="Name: upbound-configuration-aws-network\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0\nInstalled: True\nHealthy: True\n",penwidth="2"]; n4[label="Name: upbound-configuration-aws-network-97be9100cfe1\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0\nHealthy: True\nState: HealthyPackageRevision\n",penwidth="2"]; n5[label="Name: upbound-provider-aws-ec2\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/provider-aws-ec2:v0.47.0\nInstalled: True\nHealthy: Unknown\n",penwidth="2"]; n6[label="Name: upbound-provider-aws-ec2-9ad7b5db2899\nApiVersion: pkg.crossplane.io/v1\nPackage: xpkg.upbound.io/upbound/provider-aws-ec2:v0.47.0\nHealthy: False\nState: UnhealthyPackageRevision\n",penwidth="2"]; @@ -103,8 +103,6 @@ func TestPrintDotGraph(t *testing.T) { if diff := cmp.Diff(tc.want.dotString, got); diff != "" { t.Errorf("%s\nDotPrinter.Print(): -want, +got:\n%s", tc.reason, diff) } - }) - } } diff --git a/cmd/crank/beta/trace/internal/printer/json.go b/cmd/crank/beta/trace/internal/printer/json.go index 7089816fa..30cbabea9 100644 --- a/cmd/crank/beta/trace/internal/printer/json.go +++ b/cmd/crank/beta/trace/internal/printer/json.go @@ -31,8 +31,7 @@ const ( ) // JSONPrinter is a printer that prints the resource graph as JSON. -type JSONPrinter struct { -} +type JSONPrinter struct{} var _ Printer = &JSONPrinter{} diff --git a/cmd/crank/beta/trace/internal/printer/json_test.go b/cmd/crank/beta/trace/internal/printer/json_test.go index 6a6f1d9fb..c4e52ff4a 100644 --- a/cmd/crank/beta/trace/internal/printer/json_test.go +++ b/cmd/crank/beta/trace/internal/printer/json_test.go @@ -305,5 +305,4 @@ func TestJSONPrinter(t *testing.T) { } }) } - } diff --git a/cmd/crank/beta/trace/internal/printer/printer.go b/cmd/crank/beta/trace/internal/printer/printer.go index 597bc32bc..9fbd68c60 100644 --- a/cmd/crank/beta/trace/internal/printer/printer.go +++ b/cmd/crank/beta/trace/internal/printer/printer.go @@ -33,7 +33,7 @@ const ( // Type represents the type of printer. type Type string -// Implemented PrinterTypes +// Implemented PrinterTypes. const ( TypeDefault Type = "default" TypeWide Type = "wide" @@ -43,7 +43,7 @@ const ( // Printer implements the interface which is used by all printers in this package. type Printer interface { - Print(io.Writer, *resource.Resource) error + Print(w io.Writer, r *resource.Resource) error } // New creates a new printer based on the specified type. diff --git a/cmd/crank/beta/trace/internal/printer/printer_test.go b/cmd/crank/beta/trace/internal/printer/printer_test.go index 2e93a8384..e95b0f747 100644 --- a/cmd/crank/beta/trace/internal/printer/printer_test.go +++ b/cmd/crank/beta/trace/internal/printer/printer_test.go @@ -219,7 +219,7 @@ func GetComplexPackage() *resource.Resource { WithDesiredState(v1.PackageRevisionActive)), }, { - Unstructured: DummyPackage(v1.ConfigurationGroupVersionKind, "upbound-configuration-aws-network upbound-configuration-aws-network", + Unstructured: DummyPackage(v1.ConfigurationGroupVersionKind, "upbound-configuration-aws-network", WithConditions(v1.Active(), v1.Healthy()), WithPackage("xpkg.upbound.io/upbound/configuration-aws-network:v0.7.0")), Children: []*resource.Resource{ diff --git a/cmd/crank/beta/trace/internal/resource/client.go b/cmd/crank/beta/trace/internal/resource/client.go index e5a910561..3e292023e 100644 --- a/cmd/crank/beta/trace/internal/resource/client.go +++ b/cmd/crank/beta/trace/internal/resource/client.go @@ -85,7 +85,6 @@ func GetResource(ctx context.Context, client client.Client, ref *v1.ObjectRefere result.SetGroupVersionKind(ref.GroupVersionKind()) err := client.Get(ctx, xpmeta.NamespacedNameOf(ref), &result) - if err != nil { // If the resource is not found, we still want to return a Resource // object with the name and namespace set, so that the caller can diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/client.go b/cmd/crank/beta/trace/internal/resource/xpkg/client.go index 05df3968c..2ef6d1984 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/client.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/client.go @@ -32,8 +32,8 @@ import ( xpunstructured "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1alpha1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -95,7 +95,7 @@ func (kc *Client) GetResourceTree(ctx context.Context, root *resource.Resource) } // the root is a package type, get the lock file now - lock := &v1beta1.Lock{} + lock := &pkgv1beta1.Lock{} if err := kc.client.Get(ctx, types.NamespacedName{Name: "lock"}, lock); err != nil { return nil, err } @@ -156,7 +156,7 @@ func (kc *Client) setPackageRuntimeConfigChild(ctx context.Context, res *resourc } // We try loading both as currently both are supported and if both are present they are merged. controllerConfigRef := pkgv1.ControllerConfigReference{} - apiVersion, kind := v1alpha1.ControllerConfigGroupVersionKind.ToAPIVersionAndKind() + apiVersion, kind := pkgv1alpha1.ControllerConfigGroupVersionKind.ToAPIVersionAndKind() if err := fieldpath.Pave(res.Unstructured.Object).GetValueInto("spec.controllerConfigRef", &runtimeConfigRef); err == nil { res.Children = append(res.Children, resource.GetResource(ctx, kc.client, &v1.ObjectReference{ APIVersion: apiVersion, @@ -199,8 +199,8 @@ func (kc *Client) getRevisions(ctx context.Context, xpkg *resource.Resource) ([] revisions.SetGroupVersionKind(pkgv1.ProviderRevisionGroupVersionKind) case pkgv1.ConfigurationGroupVersionKind.GroupKind(): revisions.SetGroupVersionKind(pkgv1.ConfigurationRevisionGroupVersionKind) - case v1beta1.FunctionGroupVersionKind.GroupKind(): - revisions.SetGroupVersionKind(v1beta1.FunctionRevisionGroupVersionKind) + case pkgv1.FunctionGroupVersionKind.GroupKind(): + revisions.SetGroupVersionKind(pkgv1.FunctionRevisionGroupVersionKind) default: // If we didn't match any of the know types, we try to guess revisions.SetGroupVersionKind(gvk.GroupVersion().WithKind(gvk.Kind + "RevisionList")) @@ -221,14 +221,14 @@ func (kc *Client) getRevisions(ctx context.Context, xpkg *resource.Resource) ([] } // getPackageDetails returns the package details for the given package type. -func getPackageDetails(t v1beta1.PackageType) (string, string, pkgv1.PackageRevision, error) { +func getPackageDetails(t pkgv1beta1.PackageType) (string, string, pkgv1.PackageRevision, error) { switch t { - case v1beta1.ProviderPackageType: + case pkgv1beta1.ProviderPackageType: return pkgv1.ProviderKind, pkgv1.ProviderGroupVersionKind.GroupVersion().String(), &pkgv1.ProviderRevision{}, nil - case v1beta1.ConfigurationPackageType: + case pkgv1beta1.ConfigurationPackageType: return pkgv1.ConfigurationKind, pkgv1.ConfigurationGroupVersionKind.GroupVersion().String(), &pkgv1.ConfigurationRevision{}, nil - case v1beta1.FunctionPackageType: - return v1beta1.FunctionKind, v1beta1.FunctionGroupVersionKind.GroupVersion().String(), &v1beta1.FunctionRevision{}, nil + case pkgv1beta1.FunctionPackageType: + return pkgv1.FunctionKind, pkgv1.FunctionGroupVersionKind.GroupVersion().String(), &pkgv1.FunctionRevision{}, nil default: return "", "", nil, errors.Errorf("unknown package dependency type %s", t) } @@ -236,7 +236,7 @@ func getPackageDetails(t v1beta1.PackageType) (string, string, pkgv1.PackageRevi // getDependencyRef returns the dependency reference for the given package, // based on the lock file. -func (kc *Client) getDependencyRef(ctx context.Context, lock *v1beta1.Lock, pkgType v1beta1.PackageType, pkg string) (*v1.ObjectReference, error) { +func (kc *Client) getDependencyRef(ctx context.Context, lock *pkgv1beta1.Lock, pkgType pkgv1beta1.PackageType, pkg string) (*v1.ObjectReference, error) { // if we don't find a package to match the current dependency, which // can happen during initial installation when dependencies are // being discovered and fetched. We'd still like to show something @@ -285,7 +285,7 @@ func (kc *Client) getDependencyRef(ctx context.Context, lock *v1beta1.Lock, pkgT } // getPackageDeps returns the dependencies for the given package resource. -func (kc *Client) getPackageDeps(ctx context.Context, node *resource.Resource, lock *v1beta1.Lock, uniqueDeps map[string]struct{}) ([]v1.ObjectReference, error) { +func (kc *Client) getPackageDeps(ctx context.Context, node *resource.Resource, lock *pkgv1beta1.Lock, uniqueDeps map[string]struct{}) ([]v1.ObjectReference, error) { cr, _ := fieldpath.Pave(node.Unstructured.Object).GetString("status.currentRevision") if cr == "" { // we don't have a current package revision, so just return empty deps @@ -293,7 +293,7 @@ func (kc *Client) getPackageDeps(ctx context.Context, node *resource.Resource, l } // find the lock file entry for the current revision - var lp *v1beta1.LockPackage + var lp *pkgv1beta1.LockPackage for i := range lock.Packages { if lock.Packages[i].Name == cr { lp = &lock.Packages[i] diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go index 725c1a620..689aae1b0 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/client_test.go @@ -24,7 +24,7 @@ import ( ) // TODO add more cases, fake client -// Consider testing getPackageDeps instead to cover more +// Consider testing getPackageDeps instead to cover more. func TestGetDependencyRef(t *testing.T) { type args struct { pkgType v1beta1.PackageType @@ -145,7 +145,7 @@ func TestGetDependencyRef(t *testing.T) { pkgType: v1beta1.FunctionPackageType, pkg: "example.com/function-1:v1.0.0", client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + MockGet: test.NewMockGetFn(nil, func(_ client.Object) error { return kerrors.NewNotFound(schema.GroupResource{}, "whatever") }), }, @@ -161,7 +161,7 @@ func TestGetDependencyRef(t *testing.T) { want: want{ err: nil, ref: &v1.ObjectReference{ - APIVersion: "pkg.crossplane.io/v1beta1", + APIVersion: "pkg.crossplane.io/v1", Kind: "Function", Name: "function-1", }, diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go index 5ab1e7ccb..f589ba707 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + pkgv1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) @@ -54,7 +54,7 @@ const ( func IsPackageType(gk schema.GroupKind) bool { return gk == pkgv1.ProviderGroupVersionKind.GroupKind() || gk == pkgv1.ConfigurationGroupVersionKind.GroupKind() || - gk == pkgv1beta1.FunctionGroupVersionKind.GroupKind() + gk == pkgv1.FunctionGroupVersionKind.GroupKind() } // IsPackageRevisionType returns true if the GroupKind is a Crossplane package @@ -62,12 +62,12 @@ func IsPackageType(gk schema.GroupKind) bool { func IsPackageRevisionType(gk schema.GroupKind) bool { return gk == pkgv1.ConfigurationRevisionGroupVersionKind.GroupKind() || gk == pkgv1.ProviderRevisionGroupVersionKind.GroupKind() || - gk == pkgv1beta1.FunctionRevisionGroupVersionKind.GroupKind() + gk == pkgv1.FunctionRevisionGroupVersionKind.GroupKind() } // IsPackageRuntimeConfigType returns true if the GroupKind is a Crossplane runtime // config type. func IsPackageRuntimeConfigType(gk schema.GroupKind) bool { return gk == pkgv1beta1.DeploymentRuntimeConfigGroupVersionKind.GroupKind() || - gk == v1alpha1.ControllerConfigGroupVersionKind.GroupKind() + gk == pkgv1alpha1.ControllerConfigGroupVersionKind.GroupKind() } diff --git a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go index a8b2b1b54..460bb201c 100644 --- a/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go +++ b/cmd/crank/beta/trace/internal/resource/xpkg/xpkg_test.go @@ -21,9 +21,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1alpha1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" + pkgv1alpha1 "github.com/crossplane/crossplane/apis/pkg/v1alpha1" + pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) func TestIsPackageType(t *testing.T) { @@ -42,7 +42,7 @@ func TestIsPackageType(t *testing.T) { "V1ProviderOK": { reason: "Should return true for a v1 Provider", args: args{ - gk: v1.ProviderGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -51,7 +51,7 @@ func TestIsPackageType(t *testing.T) { "V1ConfigurationOK": { reason: "Should return true for a v1 Configuration", args: args{ - gk: v1.ConfigurationGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -60,7 +60,7 @@ func TestIsPackageType(t *testing.T) { "V1beta1FunctionOK": { reason: "Should return true for a v1beta1 Function", args: args{ - gk: v1beta1.FunctionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -69,7 +69,7 @@ func TestIsPackageType(t *testing.T) { "V1ProviderRevisionKO": { reason: "Should return false for a v1 ProviderRevision", args: args{ - gk: v1.ProviderRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -78,7 +78,7 @@ func TestIsPackageType(t *testing.T) { "V1ConfigurationRevisionKO": { reason: "Should return false for a v1 ConfigurationRevision", args: args{ - gk: v1.ConfigurationRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -87,7 +87,7 @@ func TestIsPackageType(t *testing.T) { "V1beta1FunctionRevisionKO": { reason: "Should return false for a v1beta1 FunctionRevision", args: args{ - gk: v1beta1.FunctionRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -132,7 +132,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ProviderKO": { reason: "Should return false for a v1 Provider", args: args{ - gk: v1.ProviderGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -141,7 +141,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ConfigurationKO": { reason: "Should return false for a v1 Configuration", args: args{ - gk: v1.ConfigurationGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -150,7 +150,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1beta1FunctionKO": { reason: "Should return false for a v1beta1 Function", args: args{ - gk: v1beta1.FunctionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionGroupVersionKind.GroupKind(), }, want: want{ ok: false, @@ -159,7 +159,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ProviderRevisionOK": { reason: "Should return true for a v1 ProviderRevision", args: args{ - gk: v1.ProviderRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ProviderRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -168,7 +168,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1ConfigurationRevisionOK": { reason: "Should return true for a v1 ConfigurationRevision", args: args{ - gk: v1.ConfigurationRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.ConfigurationRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -177,7 +177,7 @@ func TestIsPackageRevisionType(t *testing.T) { "V1beta1FunctionRevisionOK": { reason: "Should return true for a v1beta1 FunctionRevision", args: args{ - gk: v1beta1.FunctionRevisionGroupVersionKind.GroupKind(), + gk: pkgv1.FunctionRevisionGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -221,7 +221,7 @@ func TestIsPackageRuntimeConfigType(t *testing.T) { "V1Alpha1ControllerConfigOK": { reason: "Should return true for a v1alpha1 ControllerConfig", args: args{ - gk: v1alpha1.ControllerConfigGroupVersionKind.GroupKind(), + gk: pkgv1alpha1.ControllerConfigGroupVersionKind.GroupKind(), }, want: want{ ok: true, @@ -230,7 +230,7 @@ func TestIsPackageRuntimeConfigType(t *testing.T) { "V1Beta1DeploymentRuntimeConfigOK": { reason: "Should return true for a v1beta1 DeploymentRuntimeConfig", args: args{ - gk: v1beta1.DeploymentRuntimeConfigGroupVersionKind.GroupKind(), + gk: pkgv1beta1.DeploymentRuntimeConfigGroupVersionKind.GroupKind(), }, want: want{ ok: true, diff --git a/cmd/crank/beta/trace/internal/resource/xrm/client.go b/cmd/crank/beta/trace/internal/resource/xrm/client.go index 336c80860..b143d2724 100644 --- a/cmd/crank/beta/trace/internal/resource/xrm/client.go +++ b/cmd/crank/beta/trace/internal/resource/xrm/client.go @@ -33,11 +33,15 @@ import ( "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" ) +// defaultConcurrency is the concurrency using which the resource tree if loaded when not explicitly specified. +const defaultConcurrency = 5 + // Client to get a Resource with all its children. type Client struct { getConnectionSecrets bool - client client.Client + client client.Client + concurrency int } // ResourceClientOption is a functional option for a Client. @@ -50,12 +54,20 @@ func WithConnectionSecrets(v bool) ResourceClientOption { } } +// WithConcurrency is a functional option that sets the concurrency for the resource load. +func WithConcurrency(n int) ResourceClientOption { + return func(c *Client) { + c.concurrency = n + } +} + // NewClient returns a new Client. func NewClient(in client.Client, opts ...ResourceClientOption) (*Client, error) { uClient := xpunstructured.NewClient(in) c := &Client{ - client: uClient, + client: uClient, + concurrency: defaultConcurrency, } for _, o := range opts { @@ -67,25 +79,20 @@ func NewClient(in client.Client, opts ...ResourceClientOption) (*Client, error) // GetResourceTree returns the requested Crossplane Resource and all its children. func (kc *Client) GetResourceTree(ctx context.Context, root *resource.Resource) (*resource.Resource, error) { - // Set up a FIFO queue to traverse the resource tree breadth first. - queue := []*resource.Resource{root} - - for len(queue) > 0 { - // Pop the first element from the queue. - res := queue[0] - queue = queue[1:] - - refs := getResourceChildrenRefs(res, kc.getConnectionSecrets) - - for i := range refs { - child := resource.GetResource(ctx, kc.client, &refs[i]) + q := newLoader(root, kc, defaultChannelCapacity) + q.load(ctx, kc.concurrency) + return root, nil +} - res.Children = append(res.Children, child) - queue = append(queue, child) - } - } +// loadResource returns the resource for the specified object reference. +func (kc *Client) loadResource(ctx context.Context, ref *v1.ObjectReference) *resource.Resource { + return resource.GetResource(ctx, kc.client, ref) +} - return root, nil +// getResourceChildrenRefs returns the references to the children for the given +// Resource, assuming it's a Crossplane resource, XR or XRC. +func (kc *Client) getResourceChildrenRefs(r *resource.Resource) []v1.ObjectReference { + return getResourceChildrenRefs(r, kc.getConnectionSecrets) } // getResourceChildrenRefs returns the references to the children for the given diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader.go b/cmd/crank/beta/trace/internal/resource/xrm/loader.go new file mode 100644 index 000000000..5580faa5d --- /dev/null +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader.go @@ -0,0 +1,150 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xrm + +import ( + "context" + "sort" + "sync" + + v1 "k8s.io/api/core/v1" + + "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" +) + +// defaultChannelCapacity is the buffer size of the processing channel, should be a high value +// so that there is no blocking. Correctness of processing does not depend on the channel capacity. +const defaultChannelCapacity = 1000 + +// workItem maintains the relationship of a resource to be loaded with its parent +// such that the resource that is loaded can be added as a child. +type workItem struct { + parent *resource.Resource + child v1.ObjectReference +} + +// resourceLoader is a delegate that loads resources and returns child resource refs. +type resourceLoader interface { + loadResource(ctx context.Context, ref *v1.ObjectReference) *resource.Resource + getResourceChildrenRefs(r *resource.Resource) []v1.ObjectReference +} + +// loader loads resources concurrently. +type loader struct { + root *resource.Resource // the root resource for which the tree is loaded + rl resourceLoader // the resource loader + resourceLock sync.Mutex // lock when updating the children of any resource + processing sync.WaitGroup // "counter" to track requests in flight + ch chan workItem // processing channel + done chan struct{} // done channel, signaled when all resources are loaded +} + +// newLoader creates a loader for the root resource. +func newLoader(root *resource.Resource, rl resourceLoader, channelCapacity int) *loader { + l := &loader{ + rl: rl, + ch: make(chan workItem, channelCapacity), + done: make(chan struct{}), + root: root, + } + return l +} + +// load loads the full resource tree in a concurrent manner. +func (l *loader) load(ctx context.Context, concurrency int) { + // make sure counters are incremented for root child refs before starting concurrent processing + refs := l.rl.getResourceChildrenRefs(l.root) + l.addRefs(l.root, refs) + + // signal the done channel after all items are processed + go func() { + l.processing.Wait() + close(l.done) + }() + + if concurrency < 1 { + concurrency = defaultConcurrency + } + var wg sync.WaitGroup + for range concurrency { + wg.Add(1) + // spin up a worker that processes items from the channel until the done channel is signaled. + go func() { + defer wg.Done() + for { + select { + case <-l.done: + return + case item := <-l.ch: + l.processItem(ctx, item) + } + } + }() + } + wg.Wait() + // order of children loaded for resources is not deterministic because of concurrent processing. + // Sort children explicitly to make this so. + sortRefs(l.root) +} + +func sortRefs(root *resource.Resource) { + for _, child := range root.Children { + sortRefs(child) + } + // this duplicates the sorting logic from internal/controller/apiextensions/composite/composition_functions.go + sort.Slice(root.Children, func(i, j int) bool { + l := root.Children[i].Unstructured + r := root.Children[j].Unstructured + return l.GetAPIVersion()+l.GetKind()+l.GetName() < r.GetAPIVersion()+r.GetKind()+r.GetName() + }) +} + +// addRefs adds work items to the queue. +func (l *loader) addRefs(parent *resource.Resource, refs []v1.ObjectReference) { + // only perform work and spin up a goroutine if references are present. + if len(refs) == 0 { + return + } + // ensure counters are updated synchronously + l.processing.Add(len(refs)) + // free up the current processing routine even if the channel would block. + go func() { + for _, ref := range refs { + l.ch <- workItem{ + parent: parent, + child: ref, + } + } + }() +} + +// processItem processes a single work item in the queue and decrements the in-process counter +// after adding child references. +func (l *loader) processItem(ctx context.Context, item workItem) { + defer l.processing.Done() + res := l.rl.loadResource(ctx, &item.child) + refs := l.rl.getResourceChildrenRefs(res) + l.updateChild(item, res) + l.addRefs(res, refs) +} + +// updateChild adds the supplied child resource to its parent. +func (l *loader) updateChild(item workItem, res *resource.Resource) { + l.resourceLock.Lock() + item.parent.Children = append(item.parent.Children, res) + l.resourceLock.Unlock() +} diff --git a/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go new file mode 100644 index 000000000..da2ced64d --- /dev/null +++ b/cmd/crank/beta/trace/internal/resource/xrm/loader_test.go @@ -0,0 +1,181 @@ +package xrm + +import ( + "context" + "fmt" + "math/rand" + "sync" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/crossplane/crossplane/cmd/crank/beta/trace/internal/resource" +) + +// simpleGenerator generates a tree of resources for a specific depth and the number of children to +// create at any level. +type simpleGenerator struct { + childDepth int + numItems int + l sync.Mutex // lock for accessing the depth map + depthMap map[string]int // tracks resource names and their depth so that we can stop when the desired depth is reached. +} + +func newSimpleGenerator(childDepth, numItems int) *simpleGenerator { + return &simpleGenerator{ + childDepth: childDepth, + numItems: numItems, + depthMap: map[string]int{}, + } +} + +func (d *simpleGenerator) createResource(apiVersion, kind, name string) *resource.Resource { + obj := map[string]any{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]any{ + "name": name, + }, + } + return &resource.Resource{Unstructured: unstructured.Unstructured{Object: obj}} +} + +func (d *simpleGenerator) trackResourceDepth(name string, depth int) { + d.l.Lock() + defer d.l.Unlock() + d.depthMap[name] = depth +} + +func (d *simpleGenerator) createRefAtDepth(depth int) v1.ObjectReference { + prefix := "comp-res" + if depth == d.childDepth { + prefix = "managed-res" + } + name := fmt.Sprintf("%s-%d-%d", prefix, rand.Int(), depth) + d.trackResourceDepth(name, depth) + return v1.ObjectReference{ + Kind: fmt.Sprintf("Depth%d", depth), + Name: name, + APIVersion: "example.com/v1", + } +} + +func (d *simpleGenerator) createResourceFromRef(ref *v1.ObjectReference) *resource.Resource { + return d.createResource(ref.APIVersion, ref.Kind, ref.Name) +} + +func (d *simpleGenerator) loadResource(_ context.Context, ref *v1.ObjectReference) *resource.Resource { + return d.createResourceFromRef(ref) +} + +func (d *simpleGenerator) depthFromResource(res *resource.Resource) int { + d.l.Lock() + defer d.l.Unlock() + return d.depthMap[res.Unstructured.GetName()] +} + +func (d *simpleGenerator) getResourceChildrenRefs(r *resource.Resource) []v1.ObjectReference { + depth := d.depthFromResource(r) + if depth == d.childDepth { + return nil + } + ret := make([]v1.ObjectReference, 0, d.numItems) + for range d.numItems { + ret = append(ret, d.createRefAtDepth(depth+1)) + } + return ret +} + +var _ resourceLoader = &simpleGenerator{} + +func countItems(root *resource.Resource) int { + ret := 1 + for _, child := range root.Children { + ret += countItems(child) + } + return ret +} + +func TestLoader(t *testing.T) { + type want struct { + expectedResources int + } + type args struct { + childDepth int + numItems int + channelCapacity int + concurrency int + } + tests := map[string]struct { + reason string + args args + want want + }{ + "Basic": { + reason: "simple test with default concurrency", + args: args{ + childDepth: 3, + numItems: 3, + }, + want: want{ + expectedResources: 1 + 3 + 9 + 27, + }, + }, + "BlockingBuffer": { + reason: "in-process resources greater than channel buffer, causing blocking", + args: args{ + channelCapacity: 1, + concurrency: 1, + childDepth: 3, + numItems: 10, + }, + want: want{ + expectedResources: 1 + 10 + 100 + 1000, + }, + }, + "NoRootChildren": { + reason: "top-level resource has no children", + args: args{ + childDepth: 0, + numItems: 0, + }, + want: want{ + expectedResources: 1, + }, + }, + "BadConcurrency": { + reason: "invalid concurrency is adjusted to be valid", + args: args{ + concurrency: -1, + childDepth: 3, + numItems: 3, + }, + want: want{ + expectedResources: 1 + 3 + 9 + 27, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + channelCapacity := defaultChannelCapacity + if test.args.channelCapacity > 0 { + channelCapacity = test.args.channelCapacity + } + concurrency := defaultConcurrency + if test.args.concurrency != 0 { + concurrency = test.args.concurrency + } + sg := newSimpleGenerator(test.args.childDepth, test.args.numItems) + rootRef := sg.createRefAtDepth(0) + root := sg.createResourceFromRef(&rootRef) + l := newLoader(root, sg, channelCapacity) + l.load(context.Background(), concurrency) + n := countItems(root) + if test.want.expectedResources != n { + t.Errorf("resource count mismatch: want %d, got %d", test.want.expectedResources, n) + } + }) + } +} diff --git a/cmd/crank/beta/trace/trace.go b/cmd/crank/beta/trace/trace.go index f525e87ed..924c05934 100644 --- a/cmd/crank/beta/trace/trace.go +++ b/cmd/crank/beta/trace/trace.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/restmapper" - ctrl "sigs.k8s.io/controller-runtime" + "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -45,6 +45,7 @@ const ( errGetResource = "cannot get requested resource" errCliOutput = "cannot print output" errKubeConfig = "failed to get kubeconfig" + errKubeNamespace = "failed to get namespace from kubeconfig" errInitKubeClient = "cannot init kubeclient" errGetDiscoveryClient = "cannot get discovery client" errGetMapping = "cannot get mapping for resource" @@ -58,16 +59,17 @@ const ( // Cmd builds the trace tree for a Crossplane resource. type Cmd struct { Resource string `arg:"" help:"Kind of the Crossplane resource, accepts the 'TYPE[.VERSION][.GROUP][/NAME]' format."` - Name string `arg:"" optional:"" help:"Name of the Crossplane resource, can be passed as part of the resource too."` + Name string `arg:"" help:"Name of the Crossplane resource, can be passed as part of the resource too." optional:""` // TODO(phisco): add support for all the usual kubectl flags; configFlags := genericclioptions.NewConfigFlags(true).AddFlags(...) - // TODO(phisco): move to namespace defaulting to "" and use the current context's namespace - Namespace string `short:"n" name:"namespace" help:"Namespace of the resource." default:"default"` - Output string `short:"o" name:"output" help:"Output format. One of: default, wide, json, dot." enum:"default,wide,json,dot" default:"default"` - ShowConnectionSecrets bool `short:"s" name:"show-connection-secrets" help:"Show connection secrets in the output."` - ShowPackageDependencies string `name:"show-package-dependencies" help:"Show package dependencies in the output. One of: unique, all, none." enum:"unique,all,none" default:"unique"` - ShowPackageRevisions string `name:"show-package-revisions" help:"Show package revisions in the output. One of: active, all, none." enum:"active,all,none" default:"active"` - ShowPackageRuntimeConfigs bool `name:"show-package-runtime-configs" help:"Show package runtime configs in the output." default:"false"` + Context string `default:"" help:"Kubernetes context." name:"context" short:"c"` + Namespace string `default:"" help:"Namespace of the resource." name:"namespace" short:"n"` + Output string `default:"default" enum:"default,wide,json,dot" help:"Output format. One of: default, wide, json, dot." name:"output" short:"o"` + ShowConnectionSecrets bool `help:"Show connection secrets in the output." name:"show-connection-secrets" short:"s"` + ShowPackageDependencies string `default:"unique" enum:"unique,all,none" help:"Show package dependencies in the output. One of: unique, all, none." name:"show-package-dependencies"` + ShowPackageRevisions string `default:"active" enum:"active,all,none" help:"Show package revisions in the output. One of: active, all, none." name:"show-package-revisions"` + ShowPackageRuntimeConfigs bool `default:"false" help:"Show package runtime configs in the output." name:"show-package-runtime-configs"` + Concurrency int `default:"5" help:"load concurrency" name:"concurrency"` } // Help returns help message for the trace command. @@ -84,7 +86,8 @@ Examples: # Trace a MyKind resource (mykinds.example.org/v1alpha1) named 'my-res' in the namespace 'my-ns' crossplane beta trace mykind my-res -n my-ns - # Output wide format, showing full errors and condition messages + # Output wide format, showing full errors and condition messages, and other useful info + # depending on the target type, e.g. composed resources names for composite resources or image used for packages crossplane beta trace mykind my-res -n my-ns -o wide # Show connection secrets in the output @@ -102,7 +105,7 @@ Examples: } // Run runs the trace command. -func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // TODO(phisco): refactor +func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { ctx := context.Background() logger = logger.WithValues("Resource", c.Resource, "Name", c.Name) @@ -113,10 +116,28 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc } logger.Debug("Built printer", "output", c.Output) - kubeconfig, err := ctrl.GetConfig() + clientconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{CurrentContext: c.Context}, + ) + + kubeconfig, err := clientconfig.ClientConfig() if err != nil { return errors.Wrap(err, errKubeConfig) } + + // NOTE(phisco): We used to get them set as part of + // https://github.com/kubernetes-sigs/controller-runtime/blob/2e9781e9fc6054387cf0901c70db56f0b0a63083/pkg/client/config/config.go#L96, + // this new approach doesn't set them, so we need to set them here to avoid + // being utterly slow. + // TODO(phisco): make this configurable. + if kubeconfig.QPS == 0 { + kubeconfig.QPS = 20 + } + if kubeconfig.Burst == 0 { + kubeconfig.Burst = 30 + } + logger.Debug("Found kubeconfig") client, err := client.New(kubeconfig, client.Options{ @@ -155,10 +176,18 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc APIVersion: mapping.GroupVersionKind.GroupVersion().String(), Name: name, } - if mapping.Scope.Name() == meta.RESTScopeNameNamespace && c.Namespace != "" { - logger.Debug("Requested resource is namespaced", "namespace", c.Namespace) - rootRef.Namespace = c.Namespace + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace := c.Namespace + if namespace == "" { + namespace, _, err = clientconfig.Namespace() + if err != nil { + return errors.Wrap(err, errKubeNamespace) + } + } + logger.Debug("Requested resource is namespaced", "namespace", namespace) + rootRef.Namespace = namespace } + logger.Debug("Getting resource tree", "rootRef", rootRef.String()) // Get client for k8s package root := resource.GetResource(ctx, client, rootRef) @@ -180,7 +209,10 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc } default: logger.Debug("Requested resource is not a package, assumed to be an XR, XRC or MR") - treeClient, err = xrm.NewClient(client, xrm.WithConnectionSecrets(c.ShowConnectionSecrets)) + treeClient, err = xrm.NewClient(client, + xrm.WithConnectionSecrets(c.ShowConnectionSecrets), + xrm.WithConcurrency(c.Concurrency), + ) if err != nil { return errors.Wrap(err, errInitKubeClient) } diff --git a/cmd/crank/beta/validate/cache.go b/cmd/crank/beta/validate/cache.go index eda26ff84..bf317cd5f 100644 --- a/cmd/crank/beta/validate/cache.go +++ b/cmd/crank/beta/validate/cache.go @@ -27,22 +27,22 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// Cache defines an interface for caching schemas +// Cache defines an interface for caching schemas. type Cache interface { - Store([][]byte, string) error + Store(schemas [][]byte, path string) error Flush() error Init() error Load() ([]*unstructured.Unstructured, error) - Exists(string) (string, error) + Exists(image string) (string, error) } -// LocalCache implements the Cache interface +// LocalCache implements the Cache interface. type LocalCache struct { fs afero.Fs cacheDir string } -// Store stores the schemas in the directory +// Store stores the schemas in the directory. func (c *LocalCache) Store(schemas [][]byte, path string) error { if err := c.fs.MkdirAll(path, os.ModePerm); err != nil { return errors.Wrapf(err, "cannot create directory %s", path) @@ -68,7 +68,7 @@ func (c *LocalCache) Store(schemas [][]byte, path string) error { return nil } -// Init creates the cache directory if it doesn't exist +// Init creates the cache directory if it doesn't exist. func (c *LocalCache) Init() error { if _, err := c.fs.Stat(c.cacheDir); os.IsNotExist(err) { if err := c.fs.MkdirAll(c.cacheDir, os.ModePerm); err != nil { @@ -81,12 +81,12 @@ func (c *LocalCache) Init() error { return nil } -// Flush removes the cache directory +// Flush removes the cache directory. func (c *LocalCache) Flush() error { return c.fs.RemoveAll(c.cacheDir) } -// Load loads the schemas from the cache directory +// Load loads the schemas from the cache directory. func (c *LocalCache) Load() ([]*unstructured.Unstructured, error) { loader, err := NewLoader(c.cacheDir) if err != nil { @@ -101,7 +101,7 @@ func (c *LocalCache) Load() ([]*unstructured.Unstructured, error) { return schemas, nil } -// Exists checks if the cache contains the image and returns the path if it doesn't exist +// Exists checks if the cache contains the image and returns the path if it doesn't exist. func (c *LocalCache) Exists(image string) (string, error) { fName := strings.ReplaceAll(image, ":", "@") path := filepath.Join(c.cacheDir, fName) diff --git a/cmd/crank/beta/validate/cmd.go b/cmd/crank/beta/validate/cmd.go index eb7960c6f..642ff7826 100644 --- a/cmd/crank/beta/validate/cmd.go +++ b/cmd/crank/beta/validate/cmd.go @@ -20,6 +20,7 @@ package validate import ( "os" "path/filepath" + "strings" "github.com/alecthomas/kong" "github.com/spf13/afero" @@ -35,7 +36,7 @@ type Cmd struct { Resources string `arg:"" help:"Resources source which can be a file, directory, or '-' for standard input."` // Flags. Keep them in alphabetical order. - CacheDir string `help:"Absolute path to the cache directory where downloaded schemas are stored." default:".crossplane/cache"` + CacheDir string `default:"~/.crossplane/cache" help:"Absolute path to the cache directory where downloaded schemas are stored."` CleanCache bool `help:"Clean the cache directory before downloading package schemas."` SkipSuccessResults bool `help:"Skip printing success results."` @@ -46,11 +47,11 @@ type Cmd struct { func (c *Cmd) Help() string { return ` This command validates the provided Crossplane resources against the schemas of the provided extensions like XRDs, -CRDs, providers, and configurations. The output of the "crossplane beta render" command can be +CRDs, providers, and configurations. The output of the "crossplane render" command can be piped to this validate command in order to rapidly validate on the outputs of the composition development experience. If providers or configurations are provided as extensions, they will be downloaded and loaded as CRDs before performing -validation. If the cache directory is not provided, it will default to ".crossplane/cache" in the current workspace. +validation. If the cache directory is not provided, it will default to "~/.crossplane/cache". Cache directory can be cleaned before downloading schemas by setting the "clean-cache" flag. All validation is performed offline locally using the Kubernetes API server's validation library, so it does not require @@ -66,7 +67,7 @@ Examples: crossplane beta validate extensionsDir/ resourceDir/ --skip-success-results # Validate the output of the render command against the extensions in the extensionsDir folder - crossplane beta render xr.yaml composition.yaml func.yaml --include-full-xr | crossplane beta validate extensionsDir/ - + crossplane render xr.yaml composition.yaml func.yaml --include-full-xr | crossplane beta validate extensionsDir/ - # Validate all resources in the resourceDir folder against the extensions in the extensionsDir folder using provided # cache directory and clean the cache directory before downloading schemas @@ -81,7 +82,7 @@ func (c *Cmd) AfterApply() error { } // Run validate. -func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { //nolint:gocyclo // stdin check makes it over the top +func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { if c.Resources == "-" && c.Extensions == "-" { return errors.New("cannot use stdin for both extensions and resources") } @@ -108,13 +109,9 @@ func (c *Cmd) Run(k *kong.Context, _ logging.Logger) error { //nolint:gocyclo // return errors.Wrapf(err, "cannot load resources from %q", c.Resources) } - // Update default cache directory to absolute path based on the current working directory - if c.CacheDir == defaultCacheDir { - currentPath, err := os.Getwd() - if err != nil { - return errors.Wrapf(err, "cannot get current path") - } - c.CacheDir = filepath.Join(currentPath, c.CacheDir) + if strings.HasPrefix(c.CacheDir, "~/") { + homeDir, _ := os.UserHomeDir() + c.CacheDir = filepath.Join(homeDir, c.CacheDir[2:]) } m := NewManager(c.CacheDir, c.fs, k.Stdout) diff --git a/cmd/crank/beta/validate/image.go b/cmd/crank/beta/validate/image.go index 0c8fc2cf4..8cc0e50e1 100644 --- a/cmd/crank/beta/validate/image.go +++ b/cmd/crank/beta/validate/image.go @@ -18,8 +18,10 @@ package validate import ( "fmt" + "sort" "strings" + "github.com/Masterminds/semver" "github.com/google/go-containerregistry/pkg/crane" conregv1 "github.com/google/go-containerregistry/pkg/v1" "k8s.io/apimachinery/pkg/util/yaml" @@ -27,18 +29,25 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// ImageFetcher defines an interface for fetching images +// ImageFetcher defines an interface for fetching images. type ImageFetcher interface { FetchBaseLayer(image string) (*conregv1.Layer, error) } -// Fetcher implements the ImageFetcher interface +// Fetcher implements the ImageFetcher interface. type Fetcher struct{} -// FetchBaseLayer fetches the base layer of the image which contains the 'package.yaml' file +// FetchBaseLayer fetches the base layer of the image which contains the 'package.yaml' file. func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { - if strings.Contains(image, "sha") { // Strip the digest before fetching the image - image = strings.Split(image, "@")[0] + if strings.Contains(image, "@") { + // Strip the digest before fetching the image + image = strings.SplitN(image, "@", 2)[0] + } else if strings.Contains(image, ":") { + var err error + image, err = findImageTagForVersionConstraint(image) + if err != nil { + return nil, errors.Wrapf(err, "cannot find image tag for version constraint") + } } cBytes, err := crane.Config(image) @@ -74,6 +83,63 @@ func (f *Fetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { return &ll, nil } +func findImageTagForVersionConstraint(image string) (string, error) { + // Separate the image base and the image tag + parts := strings.Split(image, ":") + lastPart := len(parts) - 1 + imageBase := strings.Join(parts[0:lastPart], ":") + imageTag := parts[lastPart] + + // Check if the tag is a constraint + isConstraint := true + c, err := semver.NewConstraint(imageTag) + if err != nil { + isConstraint = false + } + + // Return original image if no constraint was detected + if !isConstraint { + return image, nil + } + + // Fetch all image tags + tags, err := crane.ListTags(imageBase) + if err != nil { + return "", errors.Wrapf(err, "cannot fetch tags for the image %s", imageBase) + } + + // Convert tags to semver versions + vs := []*semver.Version{} + for _, r := range tags { + v, err := semver.NewVersion(r) + if err != nil { + // We skip any tags that are not valid semantic versions + continue + } + vs = append(vs, v) + } + + // Sort all versions and find the last version complient with the constraint + sort.Sort(sort.Reverse(semver.Collection(vs))) + var addVer string + for _, v := range vs { + if c.Check(v) { + addVer = v.Original() + + break + } + } + + if addVer == "" { + return "", errors.Errorf("cannot find any tag complient with the constraint %s", imageTag) + } + + // Compose new complete image string if any complient version was found + image = fmt.Sprintf("%s:%s", imageBase, addVer) + + return image, nil +} + func extractPackageContent(layer conregv1.Layer) ([][]byte, []byte, error) { rc, err := layer.Uncompressed() if err != nil { diff --git a/cmd/crank/beta/validate/image_test.go b/cmd/crank/beta/validate/image_test.go new file mode 100644 index 000000000..406478b0f --- /dev/null +++ b/cmd/crank/beta/validate/image_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validate + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" +) + +func TestFindImageTagForVersionConstraint(t *testing.T) { + repoName := "ubuntu" + responseTags := []byte(`{"tags":["1.2.3","4.5.6"]}`) + cases := map[string]struct { + responseBody []byte + host string + constraint string + expectedImage string + expectError bool + }{ + "NoConstraint": { + responseBody: responseTags, + constraint: "1.2.3", + expectedImage: "ubuntu:1.2.3", + }, + "Constraint": { + responseBody: responseTags, + constraint: ">=1.2.3", + expectedImage: "ubuntu:4.5.6", + }, + "ConstraintV": { + responseBody: responseTags, + constraint: ">=v1.2.3", + expectedImage: "ubuntu:4.5.6", + }, + "ConstraintPreRelease": { + responseBody: responseTags, + constraint: ">v4.5.6-rc.0.100.g658deda0.dirty", + expectedImage: "ubuntu:4.5.6", + }, + "CannotFetchTags": { + responseBody: responseTags, + host: "wrong.host", + constraint: ">=4.5.6", + expectError: true, + }, + "NoMatchingTag": { + responseBody: responseTags, + constraint: ">4.5.6", + expectError: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + tagsPath := fmt.Sprintf("/v2/%s/tags/list", repoName) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/v2/": + w.WriteHeader(http.StatusOK) + case tagsPath: + if r.Method != http.MethodGet { + t.Errorf("Method; got %v, want %v", r.Method, http.MethodGet) + } + + w.Write(tc.responseBody) + default: + t.Fatalf("Unexpected path: %v", r.URL.Path) + } + })) + defer server.Close() + + u, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("url.Parse(%v) = %v", server.URL, err) + } + + host := u.Host + if tc.host != "" { + host = tc.host + } + + image, err := findImageTagForVersionConstraint(fmt.Sprintf("%s/%s:%s", host, repoName, tc.constraint)) + + expectedImage := "" + if !tc.expectError { + expectedImage = fmt.Sprintf("%s/%s", host, tc.expectedImage) + } + + if tc.expectError && err == nil { + t.Errorf("[%s] expected: error\n", name) + } else if expectedImage != image { + t.Errorf("[%s] expected: %s, got: %s\n", name, expectedImage, image) + } + }) + } +} diff --git a/cmd/crank/beta/validate/loader.go b/cmd/crank/beta/validate/loader.go index 96741da8a..6b610779b 100644 --- a/cmd/crank/beta/validate/loader.go +++ b/cmd/crank/beta/validate/loader.go @@ -28,12 +28,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -// Loader interface defines the contract for different input sources +// Loader interface defines the contract for different input sources. type Loader interface { Load() ([]*unstructured.Unstructured, error) } -// NewLoader returns a Loader based on the input source +// NewLoader returns a Loader based on the input source. func NewLoader(input string) (Loader, error) { if input == "-" { return &StdinLoader{}, nil @@ -51,10 +51,10 @@ func NewLoader(input string) (Loader, error) { return &FileLoader{path: input}, nil } -// StdinLoader implements the Loader interface for reading from stdin +// StdinLoader implements the Loader interface for reading from stdin. type StdinLoader struct{} -// Load reads the contents from stdin +// Load reads the contents from stdin. func (s *StdinLoader) Load() ([]*unstructured.Unstructured, error) { stream, err := load(os.Stdin) if err != nil { @@ -64,12 +64,12 @@ func (s *StdinLoader) Load() ([]*unstructured.Unstructured, error) { return streamToUnstructured(stream) } -// FileLoader implements the Loader interface for reading from a file and converting input to unstructured objects +// FileLoader implements the Loader interface for reading from a file and converting input to unstructured objects. type FileLoader struct { path string } -// Load reads the contents from a file +// Load reads the contents from a file. func (f *FileLoader) Load() ([]*unstructured.Unstructured, error) { stream, err := readFile(f.path) if err != nil { @@ -79,12 +79,12 @@ func (f *FileLoader) Load() ([]*unstructured.Unstructured, error) { return streamToUnstructured(stream) } -// FolderLoader implements the Loader interface for reading from a folder +// FolderLoader implements the Loader interface for reading from a folder. type FolderLoader struct { path string } -// Load reads the contents from all files in a folder +// Load reads the contents from all files in a folder. func (f *FolderLoader) Load() ([]*unstructured.Unstructured, error) { var stream [][]byte err := filepath.Walk(f.path, func(path string, info os.FileInfo, err error) error { @@ -114,7 +114,6 @@ func isYamlFile(info os.FileInfo) bool { func readFile(path string) ([][]byte, error) { f, err := os.Open(filepath.Clean(path)) - if err != nil { return nil, errors.Wrap(err, "cannot open file") } diff --git a/cmd/crank/beta/validate/manager.go b/cmd/crank/beta/validate/manager.go index 2bab67ed1..5dae3ce7a 100644 --- a/cmd/crank/beta/validate/manager.go +++ b/cmd/crank/beta/validate/manager.go @@ -21,7 +21,6 @@ import ( "io" "github.com/spf13/afero" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" @@ -36,7 +35,6 @@ import ( ) const ( - defaultCacheDir = ".crossplane/cache" packageFileName = "package.yaml" baseLayerLabel = "base" @@ -44,18 +42,18 @@ const ( imageFmt = "%s:%s" ) -// Manager defines a Manager for preparing Crossplane packages for validation +// Manager defines a Manager for preparing Crossplane packages for validation. type Manager struct { fetcher ImageFetcher cache Cache writer io.Writer - crds []*apiextv1.CustomResourceDefinition - deps map[string]bool // One level dependency images - confs map[string]bool // Configuration images + crds []*extv1.CustomResourceDefinition + deps map[string]bool // Dependency images + confs map[string]*metav1.Configuration // Configuration images } -// NewManager returns a new Manager +// NewManager returns a new Manager. func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { m := &Manager{} @@ -66,15 +64,15 @@ func NewManager(cacheDir string, fs afero.Fs, w io.Writer) *Manager { m.fetcher = &Fetcher{} m.writer = w - m.crds = make([]*apiextv1.CustomResourceDefinition, 0) + m.crds = make([]*extv1.CustomResourceDefinition, 0) m.deps = make(map[string]bool) - m.confs = make(map[string]bool) + m.confs = make(map[string]*metav1.Configuration) return m } -// PrepExtensions converts the unstructured XRDs/CRDs to CRDs and extract package images to add as a dependency -func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error { //nolint:gocyclo // the function itself is not that complex, it just has different cases +// PrepExtensions converts the unstructured XRDs/CRDs to CRDs and extract package images to add as a dependency. +func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error { //nolint:gocognit // the function itself is not that complex, it just has different cases for _, e := range extensions { switch e.GroupVersionKind().GroupKind() { case schema.GroupKind{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"}: @@ -132,7 +130,20 @@ func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error return errors.Wrapf(err, "cannot get package image") } - m.confs[image] = true + m.confs[image] = nil + + case schema.GroupKind{Group: "meta.pkg.crossplane.io", Kind: "Configuration"}: + meta, err := e.MarshalJSON() + if err != nil { + return errors.Wrap(err, "cannot marshal configuration to JSON") + } + + cfg := &metav1.Configuration{} + if err := yaml.Unmarshal(meta, cfg); err != nil { + return errors.Wrapf(err, "cannot unmarshal configuration YAML") + } + + m.confs[cfg.Name] = cfg default: continue @@ -142,7 +153,7 @@ func (m *Manager) PrepExtensions(extensions []*unstructured.Unstructured) error return nil } -// CacheAndLoad finds and caches dependencies and loads them as CRDs +// CacheAndLoad finds and caches dependencies and loads them as CRDs. func (m *Manager) CacheAndLoad(cleanCache bool) error { if cleanCache { if err := m.cache.Flush(); err != nil { @@ -154,7 +165,7 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { return errors.Wrapf(err, "cannot initialize cache directory") } - if err := m.addDependencies(); err != nil { + if err := m.addDependencies(m.confs); err != nil { return errors.Wrapf(err, "cannot add package dependencies") } @@ -170,39 +181,56 @@ func (m *Manager) CacheAndLoad(cleanCache bool) error { return m.PrepExtensions(schemas) } -func (m *Manager) addDependencies() error { - for image := range m.confs { - m.deps[image] = true // we need to download the configuration package for the XRDs +func (m *Manager) addDependencies(confs map[string]*metav1.Configuration) error { + if len(confs) == 0 { + return nil + } - layer, err := m.fetcher.FetchBaseLayer(image) - if err != nil { - return errors.Wrapf(err, "cannot download package %s", image) - } + deepConfs := make(map[string]*metav1.Configuration) + for image := range confs { + cfg := m.confs[image] - _, meta, err := extractPackageContent(*layer) - if err != nil { - return errors.Wrapf(err, "cannot extract package file and meta") - } + if cfg == nil { + m.deps[image] = true // we need to download the configuration package for the XRDs + + layer, err := m.fetcher.FetchBaseLayer(image) + if err != nil { + return errors.Wrapf(err, "cannot download package %s", image) + } - cfg := &metav1.Configuration{} - if err := yaml.Unmarshal(meta, cfg); err != nil { - return errors.Wrapf(err, "cannot unmarshal configuration YAML") + _, meta, err := extractPackageContent(*layer) + if err != nil { + return errors.Wrapf(err, "cannot extract package file and meta") + } + if err := yaml.Unmarshal(meta, &cfg); err != nil { + return errors.Wrapf(err, "cannot unmarshal configuration YAML") + } + m.confs[image] = cfg // update the configuration } deps := cfg.Spec.MetaSpec.DependsOn for _, dep := range deps { image := "" - if dep.Configuration != nil { + if dep.Configuration != nil { //nolint:gocritic // switch is not suitable here image = *dep.Configuration } else if dep.Provider != nil { image = *dep.Provider + } else if dep.Function != nil { + image = *dep.Function + } + if len(image) > 0 { + image = fmt.Sprintf(imageFmt, image, dep.Version) + m.deps[image] = true + + if _, ok := m.confs[image]; !ok && dep.Configuration != nil { + deepConfs[image] = nil + m.confs[image] = nil + } } - image = fmt.Sprintf(imageFmt, image, dep.Version) - m.deps[image] = true } } - return nil + return m.addDependencies(deepConfs) } func (m *Manager) cacheDependencies() error { diff --git a/cmd/crank/beta/validate/manager_test.go b/cmd/crank/beta/validate/manager_test.go new file mode 100644 index 000000000..5b8ac70d1 --- /dev/null +++ b/cmd/crank/beta/validate/manager_test.go @@ -0,0 +1,356 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validate + +import ( + "bytes" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + conregv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/static" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/spf13/afero" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/crossplane/crossplane-runtime/pkg/test" +) + +var ( + // config-pkg:v1.3.0. + configPkg = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Configuration +metadata: + name: config-pkg +spec: + dependsOn: + - provider: provider-dep-1 + version: "v1.3.0" +--- + +`) + + // provider-dep-1:v1.3.0. + providerYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-dep-1 +--- + +`) + + // function-dep-1:v1.3.0. + funcYaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-dep-1 +--- + +`) + + // config-dep-1:v1.3.0. + configDep1Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Configuration +metadata: + name: config-dep-1 +spec: + dependsOn: + - configuration: config-dep-2 + version: "v1.3.0" +--- + +`) + + // config-dep-2:v1.3.0. + configDep2Yaml = []byte(`apiVersion: meta.pkg.crossplane.io/v1alpha1 +kind: Configuration +metadata: + name: config-dep-2 +spec: + dependsOn: + - provider: provider-dep-1 + version: "v1.3.0" + - function: function-dep-1 + version: "v1.3.0" +--- + +`) +) + +func TestConfigurationTypeSupport(t *testing.T) { + confpkg := static.NewLayer(configPkg, types.OCILayer) + pd := static.NewLayer(providerYaml, types.OCILayer) + fd := static.NewLayer(funcYaml, types.OCILayer) + + fetchMockFunc := func(image string) (*conregv1.Layer, error) { + switch image { + case "config-pkg:v1.3.0": + return &confpkg, nil + case "provider-dep-1:v1.3.0": + return &pd, nil + case "function-dep-1:v1.3.0": + return &fd, nil + default: + return nil, fmt.Errorf("unknown image: %s", image) + } + } + + type args struct { + extensions []*unstructured.Unstructured + fetchMock func(image string) (*conregv1.Layer, error) + } + type want struct { + err error + confs int + deps int + } + cases := map[string]struct { + reason string + args args + want want + }{ + "SuccessfulConfigPkg": { + // config-pkg + // └─►provider-dep-1 + reason: "All dependencies should be successfully added from Configuration.pkg", + args: args{ + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-pkg", + }, + "spec": map[string]interface{}{ + "package": "config-pkg:v1.3.0", + }, + }, + }, + }, + fetchMock: fetchMockFunc, + }, + want: want{ + err: nil, + confs: 1, // Configuration.pkg from remote + deps: 2, // 1 provider, 1 Configuration.pkg dependency + }, + }, + "SuccessfulConfigMeta": { + // config-meta + // └─►function-dep-1 + reason: "All dependencies should be successfully added from Configuration.meta", + args: args{ + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "meta.pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-meta", + }, + "spec": map[string]interface{}{ + "dependsOn": []map[string]interface{}{ + { + "function": "function-dep-1", + "version": "v1.3.0", + }, + }, + }, + }, + }, + }, + fetchMock: fetchMockFunc, + }, + want: want{ + err: nil, + confs: 1, // Configuration.meta + deps: 1, // Not adding Configuration.meta itself to not send it to cacheDependencies() for download + }, + }, + "SuccessfulConfigMetaAndPkg": { + // config-meta + // └─►function-dep-1 + // config-pkg + // └─►provider-dep-1 + reason: "All dependencies should be successfully added from both Configuration.meta and Configuration.pkg", + args: args{ + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "meta.pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-meta", + }, + "spec": map[string]interface{}{ + "dependsOn": []map[string]interface{}{ + { + "function": "function-dep-1", + "version": "v1.3.0", + }, + }, + }, + }, + }, + { + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-pkg", + }, + "spec": map[string]interface{}{ + "package": "config-pkg:v1.3.0", + }, + }, + }, + }, + fetchMock: fetchMockFunc, + }, + want: want{ + err: nil, + confs: 2, // Configuration.meta and Configuration.pkg + deps: 3, // 1 Configuration.pkg, 1 provider, 1 function + }, + }, + } + for name, tc := range cases { + fs := afero.NewMemMapFs() + w := &bytes.Buffer{} + + m := NewManager("", fs, w) + t.Run(name, func(t *testing.T) { + m.fetcher = &MockFetcher{tc.args.fetchMock} + err := m.PrepExtensions(tc.args.extensions) + + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nPrepExtensions(...): -want error, +got error:\n%s", tc.reason, diff) + } + + err = m.addDependencies(m.confs) + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want error, +got error:\n%s", tc.reason, diff) + } + + if diff := cmp.Diff(tc.want.confs, len(m.confs)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want confs, +got confs:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.deps, len(m.deps)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want deps, +got deps:\n%s", tc.reason, diff) + } + }) + } +} + +func TestAddDependencies(t *testing.T) { + cd1 := static.NewLayer(configDep1Yaml, types.OCILayer) + cd2 := static.NewLayer(configDep2Yaml, types.OCILayer) + pd1 := static.NewLayer(providerYaml, types.OCILayer) + fd1 := static.NewLayer(funcYaml, types.OCILayer) + + fetchMockFunc := func(image string) (*conregv1.Layer, error) { + switch image { + case "config-dep-1:v1.3.0": + return &cd1, nil + case "config-dep-2:v1.3.0": + return &cd2, nil + case "provider-dep-1:v1.3.0": + return &pd1, nil + case "function-dep-1:v1.3.0": + return &fd1, nil + default: + return nil, fmt.Errorf("unknown image: %s", image) + } + } + + type args struct { + extensions []*unstructured.Unstructured + fetchMock func(image string) (*conregv1.Layer, error) + } + type want struct { + confs int + deps int + err error + } + cases := map[string]struct { + reason string + args args + want want + }{ + "SuccessfulDependenciesAddition": { + // config-dep-1 + // └─►config-dep-2 + // ├─►provider-dep-1 + // └─►function-dep-1 + reason: "All dependencies should be successfully fetched and added", + args: args{ + fetchMock: fetchMockFunc, + extensions: []*unstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "pkg.crossplane.io/v1alpha1", + "kind": "Configuration", + "metadata": map[string]interface{}{ + "name": "config-dep-1", + }, + "spec": map[string]interface{}{ + "package": "config-dep-1:v1.3.0", + }, + }, + }, + }, + }, + want: want{ + confs: 2, // 1 Base configuration (config-dep-1), 1 child configuration (config-dep-2) + deps: 4, // 2 configurations (config-dep-1, config-dep-2), 1 provider (provider-dep-1), 1 function (function-dep-1) + err: nil, + }, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + fs := afero.NewMemMapFs() + w := &bytes.Buffer{} + + m := NewManager("", fs, w) + _ = m.PrepExtensions(tc.args.extensions) + + m.fetcher = &MockFetcher{tc.args.fetchMock} + err := m.addDependencies(m.confs) + + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.confs, len(m.confs)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want confs, +got confs:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.deps, len(m.deps)); diff != "" { + t.Errorf("\n%s\naddDependencies(...): -want deps, +got deps:\n%s", tc.reason, diff) + } + }) + } +} + +type MockFetcher struct { + fetch func(image string) (*conregv1.Layer, error) +} + +func (m *MockFetcher) FetchBaseLayer(image string) (*conregv1.Layer, error) { + return m.fetch(image) +} diff --git a/cmd/crank/beta/validate/unknown_fields.go b/cmd/crank/beta/validate/unknown_fields.go new file mode 100644 index 000000000..4f918e033 --- /dev/null +++ b/cmd/crank/beta/validate/unknown_fields.go @@ -0,0 +1,42 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validate + +import ( + "fmt" + "strings" + + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// validateUnknownFields Validates the resource's unknown fields against the given schema and returns a list of errors. +func validateUnknownFields(mr map[string]interface{}, sch *schema.Structural) field.ErrorList { + opts := schema.UnknownFieldPathOptions{ + TrackUnknownFieldPaths: true, // to get the list of pruned unknown fields + } + errs := field.ErrorList{} + + uf := pruning.PruneWithOptions(mr, sch, true, opts) + for _, f := range uf { + strPath := strings.Split(f, ".") + child := strPath[len(strPath)-1] + errs = append(errs, field.Invalid(field.NewPath(f), child, fmt.Sprintf("unknown field: \"%s\"", child))) + } + return errs +} diff --git a/cmd/crank/beta/validate/validate.go b/cmd/crank/beta/validate/validate.go index a8e4d63b6..7da0f6509 100644 --- a/cmd/crank/beta/validate/validate.go +++ b/cmd/crank/beta/validate/validate.go @@ -28,6 +28,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apiserver/validation" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" runtimeschema "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" celconfig "k8s.io/apiserver/pkg/apis/cel" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -90,8 +91,8 @@ func newValidatorsAndStructurals(crds []*extv1.CustomResourceDefinition) (map[ru return validators, structurals, nil } -// SchemaValidation validates the resources against the given CRDs -func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.CustomResourceDefinition, skipSuccessLogs bool, w io.Writer) error { //nolint:gocyclo // printing the output increases the cyclomatic complexity a little bit +// SchemaValidation validates the resources against the given CRDs. +func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.CustomResourceDefinition, skipSuccessLogs bool, w io.Writer) error { //nolint:gocognit // printing the output increases the cyclomatic complexity a little bit schemaValidators, structurals, err := newValidatorsAndStructurals(crds) if err != nil { return errors.Wrap(err, "cannot create schema validators") @@ -102,6 +103,7 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust for i, r := range resources { gvk := r.GetObjectKind().GroupVersionKind() sv, ok := schemaValidators[gvk] + s := structurals[gvk] // if we have a schema validator, we should also have a structural if !ok { missingSchemas++ if _, err := fmt.Fprintf(w, "[!] could not find CRD/XRD for: %s\n", r.GroupVersionKind().String()); err != nil { @@ -112,9 +114,10 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust } rf := 0 - + re := field.ErrorList{} for _, v := range sv { - re := validation.ValidateCustomResource(nil, r, *v) + re = append(re, validation.ValidateCustomResource(nil, r, *v)...) + re = append(re, validateUnknownFields(r.UnstructuredContent(), s)...) for _, e := range re { rf++ if _, err := fmt.Fprintf(w, "[x] schema validation error %s, %s : %s\n", r.GroupVersionKind().String(), getResourceName(r), e.Error()); err != nil { @@ -122,8 +125,6 @@ func SchemaValidation(resources []*unstructured.Unstructured, crds []*extv1.Cust } } - s := structurals[gvk] // if we have a schema validator, we should also have a structural - celValidator := cel.NewValidator(s, true, celconfig.PerCallLimit) re, _ = celValidator.Validate(context.TODO(), nil, s, resources[i].Object, nil, celconfig.PerCallLimit) for _, e := range re { diff --git a/cmd/crank/beta/validate/validate_test.go b/cmd/crank/beta/validate/validate_test.go index 9b6511a39..7f7ec9f47 100644 --- a/cmd/crank/beta/validate/validate_test.go +++ b/cmd/crank/beta/validate/validate_test.go @@ -22,8 +22,10 @@ import ( "github.com/google/go-cmp/cmp" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/utils/ptr" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -526,6 +528,15 @@ func TestConvertToCRDs(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -869,6 +880,15 @@ func TestConvertToCRDs(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1113,6 +1133,15 @@ func TestConvertToCRDs(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1311,3 +1340,89 @@ func TestValidateResources(t *testing.T) { }) } } + +func TestValidateUnknownFields(t *testing.T) { + type args struct { + mr map[string]interface{} + sch *schema.Structural + } + type want struct { + errs field.ErrorList + } + cases := map[string]struct { + reason string + args args + want want + }{ + "UnknownFieldPresent": { + reason: "Should detect unknown fields in the resource and return an error", + args: args{ + mr: map[string]interface{}{ + "apiVersion": "test.org/v1alpha1", + "kind": "Test", + "metadata": map[string]interface{}{ + "name": "test-instance", + }, + "spec": map[string]interface{}{ + "replicas": 3, + "unknownField": "should fail", // This field is not defined in the CRD schema + }, + }, + sch: &schema.Structural{ + Properties: map[string]schema.Structural{ + "spec": { + Properties: map[string]schema.Structural{ + "replicas": { + Generic: schema.Generic{Type: "integer"}, + }, + }, + }, + }, + }, + }, + want: want{ + errs: field.ErrorList{ + field.Invalid(field.NewPath("spec.unknownField"), "unknownField", `unknown field: "unknownField"`), + }, + }, + }, + "UnknownFieldNotPresent": { + reason: "Should not return an error when no unknown fields are present", + args: args{ + mr: map[string]interface{}{ + "apiVersion": "test.org/v1alpha1", + "kind": "Test", + "metadata": map[string]interface{}{ + "name": "test-instance", + }, + "spec": map[string]interface{}{ + "replicas": 3, // No unknown fields + }, + }, + sch: &schema.Structural{ + Properties: map[string]schema.Structural{ + "spec": { + Properties: map[string]schema.Structural{ + "replicas": { + Generic: schema.Generic{Type: "integer"}, + }, + }, + }, + }, + }, + }, + want: want{ + errs: field.ErrorList{}, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + errs := validateUnknownFields(tc.args.mr, tc.args.sch) + if diff := cmp.Diff(tc.want.errs, errs, test.EquateErrors()); diff != "" { + t.Errorf("%s\nvalidateUnknownFields(...): -want errs, +got errs:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/cmd/crank/beta/xpkg/xpkg.go b/cmd/crank/beta/xpkg/xpkg.go deleted file mode 100644 index 52b3897bb..000000000 --- a/cmd/crank/beta/xpkg/xpkg.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package xpkg contains beta Crossplane packaging commands. -package xpkg - -// Cmd contains commands for interacting with packages. -type Cmd struct { - // Keep commands sorted alphabetically. - Init initCmd `cmd:"" help:"Initialize a new package from a template."` -} - -// Help prints out the help for the xpkg command. -func (c *Cmd) Help() string { - return ` -Crossplane can be extended using packages. Crossplane packages are called xpkgs. -Crossplane supports configuration, provider and function packages. - -A package is an opinionated OCI image that contains everything needed to extend -a Crossplane control plane with new functionality. For example installing a -provider package extends Crossplane with support for new kinds of managed -resource (MR). - -See https://docs.crossplane.io/latest/concepts/packages for more information. -` -} diff --git a/cmd/crank/main.go b/cmd/crank/main.go index c3aefdb3a..ff4594f24 100644 --- a/cmd/crank/main.go +++ b/cmd/crank/main.go @@ -18,36 +18,22 @@ limitations under the License. package main import ( - "fmt" - "github.com/alecthomas/kong" "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane/cmd/crank/beta" + "github.com/crossplane/crossplane/cmd/crank/render" + "github.com/crossplane/crossplane/cmd/crank/version" "github.com/crossplane/crossplane/cmd/crank/xpkg" - "github.com/crossplane/crossplane/internal/version" ) -var _ = kong.Must(&cli) - -type versionFlag string -type verboseFlag bool - -// Decode overrides the default string decoder to be a no-op. -func (v versionFlag) Decode(_ *kong.DecodeContext) error { return nil } - -// IsBool indicates that this string flag should be treated as a boolean value. -func (v versionFlag) IsBool() bool { return true } +var _ = kong.Must(&cli{}) -// BeforeApply indicates that we want to execute the logic before running any -// commands. -func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // BeforeApply requires this signature. - fmt.Fprintln(app.Stdout, version.New().GetVersionString()) - app.Exit(0) - return nil -} +type ( + verboseFlag bool +) func (v verboseFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // BeforeApply requires this signature. logger := logging.NewLogrLogger(zap.New(zap.UseDevMode(true))) @@ -56,25 +42,26 @@ func (v verboseFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // } // The top-level crossplane CLI. -var cli struct { +type cli struct { // Subcommands and flags will appear in the CLI help output in the same // order they're specified here. Keep them in alphabetical order. // Subcommands. - XPKG xpkg.Cmd `cmd:"" help:"Manage Crossplane packages."` + XPKG xpkg.Cmd `cmd:"" help:"Manage Crossplane packages."` + Render render.Cmd `cmd:"" help:"Render a composite resource (XR)."` // The alpha and beta subcommands are intentionally in a separate block. We // want them to appear after all other subcommands. - Beta beta.Cmd `cmd:"" help:"Beta commands."` + Beta beta.Cmd `cmd:"" help:"Beta commands."` + Version version.Cmd `cmd:"" help:"Print the client and server version information for the current context."` // Flags. - Verbose verboseFlag `name:"verbose" help:"Print verbose logging statements."` - Version versionFlag `short:"v" name:"version" help:"Print version and quit."` + Verbose verboseFlag `help:"Print verbose logging statements." name:"verbose"` } func main() { logger := logging.NewNopLogger() - ctx := kong.Parse(&cli, + ctx := kong.Parse(&cli{}, kong.Name("crossplane"), kong.Description("A command line tool for interacting with Crossplane."), // Binding a variable to kong context makes it available to all commands diff --git a/cmd/crank/beta/render/cmd.go b/cmd/crank/render/cmd.go similarity index 74% rename from cmd/crank/beta/render/cmd.go rename to cmd/crank/render/cmd.go index 0b93450a1..ee50633bc 100644 --- a/cmd/crank/beta/render/cmd.go +++ b/cmd/crank/render/cmd.go @@ -39,20 +39,20 @@ import ( // Cmd arguments and flags for render subcommand. type Cmd struct { // Arguments. - CompositeResource string `arg:"" type:"existingfile" help:"A YAML file specifying the composite resource (XR) to render."` - Composition string `arg:"" type:"existingfile" help:"A YAML file specifying the Composition to use to render the XR. Must be mode: Pipeline."` - Functions string `arg:"" type:"path" help:"A YAML file or directory of YAML files specifying the Composition Functions to use to render the XR."` + CompositeResource string `arg:"" help:"A YAML file specifying the composite resource (XR) to render." type:"existingfile"` + Composition string `arg:"" help:"A YAML file specifying the Composition to use to render the XR. Must be mode: Pipeline." type:"existingfile"` + Functions string `arg:"" help:"A YAML file or directory of YAML files specifying the Composition Functions to use to render the XR." type:"path"` // Flags. Keep them in alphabetical order. - ContextFiles map[string]string `mapsep:"," help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be files containing JSON."` - ContextValues map[string]string `mapsep:"," help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be JSON. Keys take precedence over --context-files."` - IncludeFunctionResults bool `short:"r" help:"Include informational and warning messages from Functions in the rendered output as resources of kind: Result."` - IncludeFullXR bool `short:"x" help:"Include a direct copy of the input XR's spec and metadata fields in the rendered output."` - ObservedResources string `short:"o" placeholder:"PATH" type:"path" help:"A YAML file or directory of YAML files specifying the observed state of composed resources."` - ExtraResources string `short:"e" placeholder:"PATH" type:"path" help:"A YAML file or directory of YAML files specifying extra resources to pass to the Function pipeline."` - IncludeContext bool `short:"c" help:"Include the context in the rendered output as a resource of kind: Context."` + ContextFiles map[string]string `help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be files containing JSON." mapsep:""` + ContextValues map[string]string `help:"Comma-separated context key-value pairs to pass to the Function pipeline. Values must be JSON. Keys take precedence over --context-files." mapsep:""` + IncludeFunctionResults bool `help:"Include informational and warning messages from Functions in the rendered output as resources of kind: Result." short:"r"` + IncludeFullXR bool `help:"Include a direct copy of the input XR's spec and metadata fields in the rendered output." short:"x"` + ObservedResources string `help:"A YAML file or directory of YAML files specifying the observed state of composed resources." placeholder:"PATH" short:"o" type:"path"` + ExtraResources string `help:"A YAML file or directory of YAML files specifying extra resources to pass to the Function pipeline." placeholder:"PATH" short:"e" type:"path"` + IncludeContext bool `help:"Include the context in the rendered output as a resource of kind: Context." short:"c"` - Timeout time.Duration `help:"How long to run before timing out." default:"1m"` + Timeout time.Duration `default:"1m" help:"How long to run before timing out."` fs afero.Fs } @@ -96,18 +96,18 @@ to the Docker daemon. Examples: # Simulate creating a new XR. - crossplane beta render xr.yaml composition.yaml functions.yaml + crossplane render xr.yaml composition.yaml functions.yaml # Simulate updating an XR that already exists. - crossplane beta render xr.yaml composition.yaml functions.yaml \ + crossplane render xr.yaml composition.yaml functions.yaml \ --observed-resources=existing-observed-resources.yaml # Pass context values to the Function pipeline. - crossplane beta render xr.yaml composition.yaml functions.yaml \ + crossplane render xr.yaml composition.yaml functions.yaml \ --context-values=apiextensions.crossplane.io/environment='{"key": "value"}' # Pass extra resources Functions in the pipeline can request. - crossplane beta render xr.yaml composition.yaml functions.yaml \ + crossplane render xr.yaml composition.yaml functions.yaml \ --extra-resources=extra-resources.yaml ` } @@ -119,7 +119,7 @@ func (c *Cmd) AfterApply() error { } // Run render. -func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // Only a touch over. +func (c *Cmd) Run(k *kong.Context, log logging.Logger) error { //nolint:gocognit // Only a touch over. xr, err := LoadCompositeResource(c.fs, c.CompositeResource) if err != nil { return errors.Wrapf(err, "cannot load composite resource from %q", c.CompositeResource) @@ -134,7 +134,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc warns, errs := comp.Validate() for _, warn := range warns { - fmt.Fprintf(k.Stderr, "WARN(composition): %s\n", warn) + _, _ = fmt.Fprintf(k.Stderr, "WARN(composition): %s\n", warn) } if len(errs) > 0 { return errors.Wrapf(errs.ToAggregate(), "invalid Composition %q", comp.GetName()) @@ -180,7 +180,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) defer cancel() - out, err := Render(ctx, logger, Inputs{ + out, err := Render(ctx, log, Inputs{ CompositeResource: xr, Composition: comp, Functions: fns, @@ -221,13 +221,13 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc } } - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(out.CompositeResource, os.Stdout); err != nil { return errors.Wrapf(err, "cannot marshal composite resource %q to YAML", xr.GetName()) } for i := range out.ComposedResources { - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(&out.ComposedResources[i], os.Stdout); err != nil { return errors.Wrapf(err, "cannot marshal composed resource %q to YAML", out.ComposedResources[i].GetAnnotations()[AnnotationKeyCompositionResourceName]) } @@ -235,7 +235,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc if c.IncludeFunctionResults { for i := range out.Results { - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(&out.Results[i], os.Stdout); err != nil { return errors.Wrap(err, "cannot marshal result to YAML") } @@ -243,7 +243,7 @@ func (c *Cmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyc } if c.IncludeContext { - fmt.Fprintln(k.Stdout, "---") + _, _ = fmt.Fprintln(k.Stdout, "---") if err := s.Encode(out.Context, os.Stdout); err != nil { return errors.Wrap(err, "cannot marshal context to YAML") } diff --git a/cmd/crank/beta/render/load.go b/cmd/crank/render/load.go similarity index 95% rename from cmd/crank/beta/render/load.go rename to cmd/crank/render/load.go index 84b13f12c..6a1d11765 100644 --- a/cmd/crank/beta/render/load.go +++ b/cmd/crank/render/load.go @@ -30,6 +30,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) @@ -69,20 +70,21 @@ func LoadComposition(fs afero.Fs, file string) (*apiextensionsv1.Composition, er // a directory of manifests instead of a single stream. // LoadFunctions from a stream of YAML manifests. -func LoadFunctions(filesys afero.Fs, file string) ([]pkgv1beta1.Function, error) { +func LoadFunctions(filesys afero.Fs, file string) ([]pkgv1.Function, error) { stream, err := LoadYAMLStream(filesys, file) if err != nil { return nil, errors.Wrap(err, "cannot load YAML stream from file") } - functions := make([]pkgv1beta1.Function, 0, len(stream)) + // TODO(negz): This needs to support v1beta1 functions, too. + functions := make([]pkgv1.Function, 0, len(stream)) for _, y := range stream { - f := &pkgv1beta1.Function{} + f := &pkgv1.Function{} if err := yaml.Unmarshal(y, f); err != nil { return nil, errors.Wrap(err, "cannot parse YAML Function manifest") } switch gvk := f.GroupVersionKind(); gvk { - case pkgv1beta1.FunctionGroupVersionKind: + case pkgv1.FunctionGroupVersionKind, pkgv1beta1.FunctionGroupVersionKind: functions = append(functions, *f) default: return nil, errors.Errorf("not a function: %s/%s", gvk.Kind, f.GetName()) diff --git a/cmd/crank/beta/render/load_test.go b/cmd/crank/render/load_test.go similarity index 96% rename from cmd/crank/beta/render/load_test.go rename to cmd/crank/render/load_test.go index f64cf7b22..e1a29738e 100644 --- a/cmd/crank/beta/render/load_test.go +++ b/cmd/crank/render/load_test.go @@ -37,10 +37,8 @@ import ( pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) -var ( - //go:embed testdata - testdatafs embed.FS -) +//go:embed testdata +var testdatafs embed.FS func TestLoadCompositeResource(t *testing.T) { fs := afero.FromIOFS{FS: testdatafs} @@ -162,7 +160,7 @@ func TestLoadFunctions(t *testing.T) { fs := afero.FromIOFS{FS: testdatafs} type want struct { - fns []pkgv1beta1.Function + fns []pkgv1.Function err error } cases := map[string]struct { @@ -172,11 +170,11 @@ func TestLoadFunctions(t *testing.T) { "Success": { file: "testdata/functions.yaml", want: want{ - fns: []pkgv1beta1.Function{ + fns: []pkgv1.Function{ { TypeMeta: metav1.TypeMeta{ - Kind: pkgv1beta1.FunctionKind, - APIVersion: pkgv1beta1.SchemeGroupVersion.String(), + Kind: pkgv1.FunctionKind, + APIVersion: pkgv1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "function-auto-ready", @@ -185,7 +183,7 @@ func TestLoadFunctions(t *testing.T) { AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupOrphan), }, }, - Spec: pkgv1beta1.FunctionSpec{ + Spec: pkgv1.FunctionSpec{ PackageSpec: pkgv1.PackageSpec{ Package: "xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.1.2", }, @@ -203,7 +201,7 @@ func TestLoadFunctions(t *testing.T) { AnnotationKeyRuntimeDevelopmentTarget: "localhost:9444", }, }, - Spec: pkgv1beta1.FunctionSpec{ + Spec: pkgv1.FunctionSpec{ PackageSpec: pkgv1.PackageSpec{ Package: "xpkg.upbound.io/crossplane-contrib/function-dummy:v0.2.1", }, @@ -411,16 +409,17 @@ func TestLoadYAMLStream(t *testing.T) { "Success": { args: args{ file: "testdata/observed.yaml", - fs: afero.FromIOFS{FS: fstest.MapFS{ - "testdata/observed.yaml": &fstest.MapFile{ - Data: []byte(`--- + fs: afero.FromIOFS{ + FS: fstest.MapFS{ + "testdata/observed.yaml": &fstest.MapFile{ + Data: []byte(`--- test: "test" --- test: "test2" `), + }, }, }, - }, }, want: want{ out: [][]byte{ diff --git a/cmd/crank/beta/render/render.go b/cmd/crank/render/render.go similarity index 71% rename from cmd/crank/beta/render/render.go rename to cmd/crank/render/render.go index f553a3ab3..9e8accda3 100644 --- a/cmd/crank/beta/render/render.go +++ b/cmd/crank/render/render.go @@ -20,8 +20,8 @@ import ( "context" "encoding/json" "fmt" - "reflect" "sort" + "sync" "time" "google.golang.org/grpc" @@ -39,10 +39,11 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" ucomposite "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" - fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" + "github.com/crossplane/crossplane/internal/xfn" ) // Wait for the server to be ready before sending RPCs. Notably this gives @@ -50,7 +51,7 @@ import ( // https://grpc.io/docs/guides/wait-for-ready/ const waitForReady = `{ "methodConfig":[{ - "name": [{"service": "apiextensions.fn.proto.v1beta1.FunctionRunnerService"}], + "name": [{}], "waitForReady": true }] }` @@ -67,7 +68,7 @@ const ( type Inputs struct { CompositeResource *ucomposite.Unstructured Composition *apiextensionsv1.Composition - Functions []pkgv1beta1.Function + Functions []pkgv1.Function ObservedResources []composed.Unstructured ExtraResources []unstructured.Unstructured Context map[string][]byte @@ -88,35 +89,91 @@ type Outputs struct { // are in use? } -// Render the desired XR and composed resources, sorted by resource name, given the supplied inputs. -func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, error) { //nolint:gocyclo // TODO(negz): Should we refactor to break this up a bit? - // Run our Functions. +// A RuntimeFunctionRunner is a composite.FunctionRunner that runs functions +// locally, using the runtime configured in their annotations (e.g. Docker). +type RuntimeFunctionRunner struct { + contexts map[string]RuntimeContext + conns map[string]*grpc.ClientConn + mx sync.Mutex +} + +// NewRuntimeFunctionRunner returns a FunctionRunner that runs functions +// locally, using the runtime configured in their annotations (e.g. Docker). It +// starts all the functions and creates gRPC connections when called. +func NewRuntimeFunctionRunner(ctx context.Context, log logging.Logger, fns []pkgv1.Function) (*RuntimeFunctionRunner, error) { + contexts := map[string]RuntimeContext{} conns := map[string]*grpc.ClientConn{} - for _, fn := range in.Functions { - runtime, err := GetRuntime(fn) + + for _, fn := range fns { + runtime, err := GetRuntime(fn, log) if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot get runtime for Function %q", fn.GetName()) + return nil, errors.Wrapf(err, "cannot get runtime for Function %q", fn.GetName()) } - rctx, err := runtime.Start(ctx, logger) + rctx, err := runtime.Start(ctx) if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot start Function %q", fn.GetName()) + return nil, errors.Wrapf(err, "cannot start Function %q", fn.GetName()) } - defer func() { - if err := rctx.Stop(ctx); err != nil { - logger.Debug("Error stopping function runtime", "function", fn.GetName(), "error", err) - } - }() + contexts[fn.GetName()] = rctx conn, err := grpc.DialContext(ctx, rctx.Target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(waitForReady)) if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot dial Function %q at address %q", fn.GetName(), rctx.Target) + return nil, errors.Wrapf(err, "cannot dial Function %q at address %q", fn.GetName(), rctx.Target) } - defer conn.Close() //nolint:errcheck // This only returns an error if the connection is already closed or closing. conns[fn.GetName()] = conn } + return &RuntimeFunctionRunner{conns: conns}, nil +} + +// RunFunction runs the named function. +func (r *RuntimeFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + r.mx.Lock() + defer r.mx.Unlock() + + conn, ok := r.conns[name] + if !ok { + return nil, errors.Errorf("unknown Function %q - does it exist in your Functions file?", name) + } + + return xfn.NewBetaFallBackFunctionRunnerServiceClient(conn).RunFunction(ctx, req) +} + +// Stop all of the runner's runtimes, and close its gRPC connections. +func (r *RuntimeFunctionRunner) Stop(ctx context.Context) error { + r.mx.Lock() + defer r.mx.Unlock() + + for name, conn := range r.conns { + _ = conn.Close() + delete(r.conns, name) + } + for name, rctx := range r.contexts { + if err := rctx.Stop(ctx); err != nil { + return errors.Wrapf(err, "cannot stop function %q runtime (target %q)", name, rctx.Target) + } + delete(r.contexts, name) + } + + return nil +} + +// Render the desired XR and composed resources, sorted by resource name, given the supplied inputs. +func Render(ctx context.Context, log logging.Logger, in Inputs) (Outputs, error) { //nolint:gocognit // TODO(negz): Should we refactor to break this up a bit? + runtimes, err := NewRuntimeFunctionRunner(ctx, log, in.Functions) + if err != nil { + return Outputs{}, errors.Wrap(err, "cannot start function runtimes") + } + + defer func() { + if err := runtimes.Stop(ctx); err != nil { + log.Info("Error stopping function runtimes", "error", err) + } + }() + + runner := composite.NewFetchingFunctionRunner(runtimes, &FilteringFetcher{extra: in.ExtraResources}) + observed := composite.ComposedResourceStates{} for i, cd := range in.ObservedResources { name := cd.GetAnnotations()[AnnotationKeyCompositionResourceName] @@ -135,7 +192,7 @@ func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, err } // The Function pipeline starts with empty desired state. - d := &fnv1beta1.State{} + d := &fnv1.State{} results := make([]unstructured.Unstructured, 0) @@ -159,15 +216,8 @@ func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, err // the desired state returned by the last, and each Function may produce // results. for _, fn := range in.Composition.Spec.Pipeline { - conn, ok := conns[fn.FunctionRef.Name] - if !ok { - return Outputs{}, errors.Errorf("unknown Function %q, referenced by pipeline step %q - does it exist in your Functions file?", fn.FunctionRef.Name, fn.Step) - } - - fClient := fnv1beta1.NewFunctionRunnerServiceClient(conn) - // The request to send to the function, will be updated at each iteration if needed. - req := &fnv1beta1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} + req := &fnv1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} if fn.Input != nil { in := &structpb.Struct{} @@ -177,47 +227,9 @@ func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, err req.Input = in } - // Used to store the requirements returned at the previous iteration. - var requirements *fnv1beta1.Requirements - // Used to store the response of the function at the previous iteration. - var rsp *fnv1beta1.RunFunctionResponse - - for i := int64(0); i <= composite.MaxRequirementsIterations; i++ { - if i == composite.MaxRequirementsIterations { - // The requirements didn't stabilize after the maximum number of iterations. - return Outputs{}, errors.Errorf("requirements didn't stabilize after the maximum number of iterations (%d)", composite.MaxRequirementsIterations) - } - - rsp, err = fClient.RunFunction(ctx, req) - if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot run pipeline step %q", fn.Step) - } - - newRequirements := rsp.GetRequirements() - if reflect.DeepEqual(newRequirements, requirements) { - // The requirements are stable, the function is done. - break - } - - // Store the requirements for the next iteration. - requirements = newRequirements - - // Cleanup the extra resources from the previous iteration to store the new ones - req.ExtraResources = make(map[string]*fnv1beta1.Resources) - - // Fetch the requested resources and add them to the desired state. - for name, selector := range newRequirements.GetExtraResources() { - newExtraResources, err := filterExtraResources(in.ExtraResources, selector) - if err != nil { - return Outputs{}, errors.Wrapf(err, "cannot filter extra resources for pipeline step %q", fn.Step) - } - - // Resources would be nil in case of not found resources. - req.ExtraResources[name] = newExtraResources - } - - // Pass down the updated context across iterations. - req.Context = rsp.GetContext() + rsp, err := runner.RunFunction(ctx, fn.FunctionRef.Name, req) + if err != nil { + return Outputs{}, errors.Wrapf(err, "cannot run pipeline step %q", fn.Step) } // Pass the desired state returned by this Function to the next one. @@ -230,7 +242,7 @@ func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, err // Results of fatal severity stop the Composition process. for _, rs := range rsp.GetResults() { switch rs.GetSeverity() { //nolint:exhaustive // We intentionally have a broad default case. - case fnv1beta1.Severity_SEVERITY_FATAL: + case fnv1.Severity_SEVERITY_FATAL: return Outputs{}, errors.Errorf("pipeline step %q returned a fatal result: %s", fn.Step, rs.GetMessage()) default: results = append(results, unstructured.Unstructured{Object: map[string]any{ @@ -247,7 +259,7 @@ func Render(ctx context.Context, logger logging.Logger, in Inputs) (Outputs, err desired := make([]composed.Unstructured, 0, len(d.GetResources())) var unready []string for name, dr := range d.GetResources() { - if dr.GetReady() != fnv1beta1.Ready_READY_TRUE { + if dr.GetReady() != fnv1.Ready_READY_TRUE { unready = append(unready, name) } @@ -329,34 +341,41 @@ func SetComposedResourceMetadata(cd resource.Object, xr resource.Composite, name return errors.Wrapf(meta.AddControllerReference(cd, or), "cannot set composite resource %q as controller ref of composed resource", xr.GetName()) } -func filterExtraResources(ers []unstructured.Unstructured, selector *fnv1beta1.ResourceSelector) (*fnv1beta1.Resources, error) { //nolint:gocyclo // There is not much to simplify here. - if len(ers) == 0 || selector == nil { +// FilteringFetcher is a composite.ExtraResourcesFetcher that "fetches" any +// supplied resource that matches a resource selector. +type FilteringFetcher struct { + extra []unstructured.Unstructured +} + +// Fetch returns all of the underlying extra resources that match the supplied +// resource selector. +func (f *FilteringFetcher) Fetch(_ context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) { + if len(f.extra) == 0 || rs == nil { return nil, nil } - out := &fnv1beta1.Resources{} - for _, er := range ers { - er := er - if selector.GetApiVersion() != er.GetAPIVersion() { + out := &fnv1.Resources{} + for _, er := range f.extra { + if rs.GetApiVersion() != er.GetAPIVersion() { continue } - if selector.GetKind() != er.GetKind() { + if rs.GetKind() != er.GetKind() { continue } - if selector.GetMatchName() == er.GetName() { + if rs.GetMatchName() == er.GetName() { o, err := composite.AsStruct(&er) if err != nil { return nil, errors.Wrapf(err, "cannot marshal extra resource %q", er.GetName()) } - out.Items = []*fnv1beta1.Resource{{Resource: o}} + out.Items = []*fnv1.Resource{{Resource: o}} return out, nil } - if selector.GetMatchLabels() != nil { - if labels.SelectorFromSet(selector.GetMatchLabels().GetLabels()).Matches(labels.Set(er.GetLabels())) { + if rs.GetMatchLabels() != nil { + if labels.SelectorFromSet(rs.GetMatchLabels().GetLabels()).Matches(labels.Set(er.GetLabels())) { o, err := composite.AsStruct(&er) if err != nil { return nil, errors.Wrapf(err, "cannot marshal extra resource %q", er.GetName()) } - out.Items = append(out.GetItems(), &fnv1beta1.Resource{Resource: o}) + out.Items = append(out.GetItems(), &fnv1.Resource{Resource: o}) } } } diff --git a/cmd/crank/beta/render/render_test.go b/cmd/crank/render/render_test.go similarity index 85% rename from cmd/crank/beta/render/render_test.go rename to cmd/crank/render/render_test.go index 72150e227..4f0600697 100644 --- a/cmd/crank/beta/render/render_test.go +++ b/cmd/crank/render/render_test.go @@ -34,11 +34,17 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + ucomposite "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" - fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" apiextensionsv1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" + "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" +) + +var ( + _ composite.FunctionRunner = &RuntimeFunctionRunner{} + _ composite.ExtraResourcesFetcher = &FilteringFetcher{} ) func TestRender(t *testing.T) { @@ -58,14 +64,14 @@ func TestRender(t *testing.T) { cases := map[string]struct { reason string - rsp *fnv1beta1.RunFunctionResponse + rsp *fnv1.RunFunctionResponse args args want want }{ "InvalidContextValue": { args: args{ in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Context: map[string][]byte{ "not-valid-json": []byte(`{`), }, @@ -78,7 +84,7 @@ func TestRender(t *testing.T) { "InvalidInput": { args: args{ in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Composition: &apiextensionsv1.Composition{ Spec: apiextensionsv1.CompositionSpec{ Pipeline: []apiextensionsv1.PipelineStep{ @@ -98,7 +104,7 @@ func TestRender(t *testing.T) { "UnknownRuntime": { args: args{ in: Inputs{ - Functions: []pkgv1beta1.Function{{ + Functions: []pkgv1.Function{{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntime: "wat", @@ -115,7 +121,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Composition: &apiextensionsv1.Composition{ Spec: apiextensionsv1.CompositionSpec{ Mode: &pipeline, @@ -137,7 +143,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: composite.New(), + CompositeResource: ucomposite.New(), Composition: &apiextensionsv1.Composition{ Spec: apiextensionsv1.CompositionSpec{ Mode: &pipeline, @@ -149,19 +155,18 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { - - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ - Results: []*fnv1beta1.Result{ + Functions: []pkgv1.Function{ + func() pkgv1.Function { + lis := NewFunction(t, &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ { - Severity: fnv1beta1.Severity_SEVERITY_FATAL, + Severity: fnv1.Severity_SEVERITY_FATAL, }, }, }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -182,7 +187,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -204,12 +209,11 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { - - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ - Desired: &fnv1beta1.State{ - Composite: &fnv1beta1.Resource{ + Functions: []pkgv1.Function{ + func() pkgv1.Function { + lis := NewFunction(t, &fnv1.RunFunctionResponse{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStructJSON(`{ "status": { "widgets": 9001, @@ -223,7 +227,7 @@ func TestRender(t *testing.T) { } }`), }, - Resources: map[string]*fnv1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "b-cool-resource": { Resource: MustStructJSON(`{ "apiVersion": "atest.crossplane.io/v1", @@ -247,7 +251,7 @@ func TestRender(t *testing.T) { }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -265,7 +269,7 @@ func TestRender(t *testing.T) { }, want: want{ out: Outputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -351,7 +355,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -373,12 +377,11 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { - - lis := NewFunction(t, &fnv1beta1.RunFunctionResponse{ - Desired: &fnv1beta1.State{ - Composite: &fnv1beta1.Resource{ + Functions: []pkgv1.Function{ + func() pkgv1.Function { + lis := NewFunction(t, &fnv1.RunFunctionResponse{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStructJSON(`{ "status": { "widgets": 9001, @@ -392,7 +395,7 @@ func TestRender(t *testing.T) { } }`), }, - Resources: map[string]*fnv1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "b-cool-resource": { Resource: MustStructJSON(`{ "apiVersion": "atest.crossplane.io/v1", @@ -401,7 +404,7 @@ func TestRender(t *testing.T) { "widgets": 9003 } }`), - Ready: fnv1beta1.Ready_READY_TRUE, + Ready: fnv1.Ready_READY_TRUE, }, "a-cool-resource": { Resource: MustStructJSON(`{ @@ -411,14 +414,14 @@ func TestRender(t *testing.T) { "widgets": 9002 } }`), - Ready: fnv1beta1.Ready_READY_TRUE, + Ready: fnv1.Ready_READY_TRUE, }, }, }, }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -436,7 +439,7 @@ func TestRender(t *testing.T) { }, want: want{ out: Outputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -521,7 +524,7 @@ func TestRender(t *testing.T) { args: args{ ctx: context.Background(), in: Inputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -543,20 +546,20 @@ func TestRender(t *testing.T) { }, }, }, - Functions: []pkgv1beta1.Function{ - func() pkgv1beta1.Function { + Functions: []pkgv1.Function{ + func() pkgv1.Function { i := 0 - lis := NewFunctionWithRunFunc(t, func(ctx context.Context, request *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { + lis := NewFunctionWithRunFunc(t, func(_ context.Context, request *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { defer func() { i++ }() switch i { case 0: - return &fnv1beta1.RunFunctionResponse{ - Requirements: &fnv1beta1.Requirements{ - ExtraResources: map[string]*fnv1beta1.ResourceSelector{ + return &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "extra-resource-by-name": { ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource", }, }, @@ -572,27 +575,27 @@ func TestRender(t *testing.T) { t.Fatalf("expected extra resource to be passed to function on second call") } foo := (res.GetItems()[0].GetResource().AsMap()["spec"].(map[string]interface{}))["foo"].(string) - return &fnv1beta1.RunFunctionResponse{ - Requirements: &fnv1beta1.Requirements{ - ExtraResources: map[string]*fnv1beta1.ResourceSelector{ + return &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ "extra-resource-by-name": { ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource", }, }, }, }, - Desired: &fnv1beta1.State{ - Composite: &fnv1beta1.Resource{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStructJSON(`{ "status": { "widgets": "` + foo + `" } }`), }, - Resources: map[string]*fnv1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "b-cool-resource": { Resource: MustStructJSON(`{ "apiVersion": "atest.crossplane.io/v1", @@ -621,7 +624,7 @@ func TestRender(t *testing.T) { }) listeners = append(listeners, lis) - return pkgv1beta1.Function{ + return pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Name: "function-test", Annotations: map[string]string{ @@ -653,7 +656,7 @@ func TestRender(t *testing.T) { }, want: want{ out: Outputs{ - CompositeResource: &composite.Unstructured{ + CompositeResource: &ucomposite.Unstructured{ Unstructured: unstructured.Unstructured{ Object: MustLoadJSON(`{ "apiVersion": "nop.example.org/v1alpha1", @@ -739,7 +742,6 @@ func TestRender(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - out, err := Render(tc.args.ctx, logging.NewNopLogger(), tc.args.in) if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty()); diff != "" { @@ -756,41 +758,45 @@ func TestRender(t *testing.T) { } } -func NewFunction(t *testing.T, rsp *fnv1beta1.RunFunctionResponse) net.Listener { +func NewFunction(t *testing.T, rsp *fnv1.RunFunctionResponse) net.Listener { + t.Helper() + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } srv := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) - fnv1beta1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{Response: rsp}) + fnv1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{Response: rsp}) go srv.Serve(lis) // This will stop when lis is closed. return lis } -func NewFunctionWithRunFunc(t *testing.T, runFunc func(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error)) net.Listener { +func NewFunctionWithRunFunc(t *testing.T, runFunc func(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error)) net.Listener { + t.Helper() + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } srv := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) - fnv1beta1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{RunFunc: runFunc}) + fnv1.RegisterFunctionRunnerServiceServer(srv, &MockFunctionRunner{RunFunc: runFunc}) go srv.Serve(lis) // This will stop when lis is closed. return lis } type MockFunctionRunner struct { - fnv1beta1.UnimplementedFunctionRunnerServiceServer + fnv1.UnimplementedFunctionRunnerServiceServer - Response *fnv1beta1.RunFunctionResponse - RunFunc func(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) + Response *fnv1.RunFunctionResponse + RunFunc func(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) Error error } -func (r *MockFunctionRunner) RunFunction(ctx context.Context, req *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { +func (r *MockFunctionRunner) RunFunction(ctx context.Context, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { if r.Response != nil { return r.Response, r.Error } @@ -798,28 +804,34 @@ func (r *MockFunctionRunner) RunFunction(ctx context.Context, req *fnv1beta1.Run } func TestFilterExtraResources(t *testing.T) { + type params struct { + ers []unstructured.Unstructured + } type args struct { - ers []unstructured.Unstructured - selector *fnv1beta1.ResourceSelector + ctx context.Context + selector *fnv1.ResourceSelector } type want struct { - out *fnv1beta1.Resources + out *fnv1.Resources err error } cases := map[string]struct { reason string + params params args args want want }{ "NilResources": { reason: "Should return empty slice if no extra resources are passed", - args: args{ + params: params{ ers: []unstructured.Unstructured{}, - selector: &fnv1beta1.ResourceSelector{ + }, + args: args{ + selector: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Foo", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource", }, }, @@ -831,7 +843,7 @@ func TestFilterExtraResources(t *testing.T) { }, "NilSelector": { reason: "Should return empty slice if no selector is passed", - args: args{ + params: params{ ers: []unstructured.Unstructured{ { Object: MustLoadJSON(`{ @@ -843,6 +855,8 @@ func TestFilterExtraResources(t *testing.T) { }`), }, }, + }, + args: args{ selector: nil, }, want: want{ @@ -852,7 +866,7 @@ func TestFilterExtraResources(t *testing.T) { }, "MatchName": { reason: "Should return slice with matching resource for name selector", - args: args{ + params: params{ ers: []unstructured.Unstructured{ { Object: MustLoadJSON(`{ @@ -882,17 +896,19 @@ func TestFilterExtraResources(t *testing.T) { }`), }, }, - selector: &fnv1beta1.ResourceSelector{ + }, + args: args{ + selector: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Bar", - Match: &fnv1beta1.ResourceSelector_MatchName{ + Match: &fnv1.ResourceSelector_MatchName{ MatchName: "extra-resource-right", }, }, }, want: want{ - out: &fnv1beta1.Resources{ - Items: []*fnv1beta1.Resource{ + out: &fnv1.Resources{ + Items: []*fnv1.Resource{ { Resource: MustStructJSON(`{ "apiVersion": "test.crossplane.io/v1", @@ -909,7 +925,7 @@ func TestFilterExtraResources(t *testing.T) { }, "MatchLabels": { reason: "Should return slice with matching resources for matching selector", - args: args{ + params: params{ ers: []unstructured.Unstructured{ { Object: MustLoadJSON(`{ @@ -969,11 +985,13 @@ func TestFilterExtraResources(t *testing.T) { }`), }, }, - selector: &fnv1beta1.ResourceSelector{ + }, + args: args{ + selector: &fnv1.ResourceSelector{ ApiVersion: "test.crossplane.io/v1", Kind: "Bar", - Match: &fnv1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &fnv1beta1.MatchLabels{ + Match: &fnv1.ResourceSelector_MatchLabels{ + MatchLabels: &fnv1.MatchLabels{ Labels: map[string]string{ "right": "true", }, @@ -982,8 +1000,8 @@ func TestFilterExtraResources(t *testing.T) { }, }, want: want{ - out: &fnv1beta1.Resources{ - Items: []*fnv1beta1.Resource{ + out: &fnv1.Resources{ + Items: []*fnv1.Resource{ { Resource: MustStructJSON(`{ "apiVersion": "test.crossplane.io/v1", @@ -1016,8 +1034,9 @@ func TestFilterExtraResources(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - out, err := filterExtraResources(tc.args.ers, tc.args.selector) - if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty(), cmpopts.IgnoreUnexported(fnv1beta1.Resources{}, fnv1beta1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { + f := &FilteringFetcher{extra: tc.params.ers} + out, err := f.Fetch(tc.args.ctx, tc.args.selector) + if diff := cmp.Diff(tc.want.out, out, cmpopts.EquateEmpty(), cmpopts.IgnoreUnexported(fnv1.Resources{}, fnv1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { t.Errorf("%s\nfilterExtraResources(...): -want, +got:\n%s", tc.reason, diff) } if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { diff --git a/cmd/crank/beta/render/runtime.go b/cmd/crank/render/runtime.go similarity index 89% rename from cmd/crank/beta/render/runtime.go rename to cmd/crank/render/runtime.go index e8f1ac789..609745631 100644 --- a/cmd/crank/beta/render/runtime.go +++ b/cmd/crank/render/runtime.go @@ -22,7 +22,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // AnnotationKeyRuntime can be added to a Function to control what runtime is @@ -50,7 +50,7 @@ const ( // A Runtime runs a Function. type Runtime interface { // Start the Function. - Start(ctx context.Context, logger logging.Logger) (RuntimeContext, error) + Start(ctx context.Context) (RuntimeContext, error) } // RuntimeContext contains context on how a Function is being run. @@ -63,12 +63,12 @@ type RuntimeContext struct { } // GetRuntime for the supplied Function, per its annotations. -func GetRuntime(fn pkgv1beta1.Function) (Runtime, error) { +func GetRuntime(fn pkgv1.Function, log logging.Logger) (Runtime, error) { switch r := RuntimeType(fn.GetAnnotations()[AnnotationKeyRuntime]); r { case AnnotationValueRuntimeDocker, "": - return GetRuntimeDocker(fn) + return GetRuntimeDocker(fn, log) case AnnotationValueRuntimeDevelopment: - return GetRuntimeDevelopment(fn), nil + return GetRuntimeDevelopment(fn, log), nil default: return nil, errors.Errorf("unsupported %q annotation value %q (unknown runtime)", AnnotationKeyRuntime, r) } diff --git a/cmd/crank/beta/render/runtime_development.go b/cmd/crank/render/runtime_development.go similarity index 80% rename from cmd/crank/beta/render/runtime_development.go rename to cmd/crank/render/runtime_development.go index ec1476397..e58f2ef67 100644 --- a/cmd/crank/beta/render/runtime_development.go +++ b/cmd/crank/render/runtime_development.go @@ -21,7 +21,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/logging" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // Annotations that can be used to configure the Development runtime. @@ -37,14 +37,18 @@ type RuntimeDevelopment struct { // Target is the gRPC target for the running function, for example // localhost:9443. Target string + // Function is the name of the function to be run. Function string + + // log is the logger for this runtime. + log logging.Logger } // GetRuntimeDevelopment extracts RuntimeDevelopment configuration from the // supplied Function. -func GetRuntimeDevelopment(fn pkgv1beta1.Function) *RuntimeDevelopment { - r := &RuntimeDevelopment{Target: "localhost:9443", Function: fn.GetName()} +func GetRuntimeDevelopment(fn pkgv1.Function, log logging.Logger) *RuntimeDevelopment { + r := &RuntimeDevelopment{Target: "localhost:9443", Function: fn.GetName(), log: log} if t := fn.GetAnnotations()[AnnotationKeyRuntimeDevelopmentTarget]; t != "" { r.Target = t } @@ -54,7 +58,7 @@ func GetRuntimeDevelopment(fn pkgv1beta1.Function) *RuntimeDevelopment { var _ Runtime = &RuntimeDevelopment{} // Start does nothing. It returns a Stop function that also does nothing. -func (r *RuntimeDevelopment) Start(_ context.Context, logger logging.Logger) (RuntimeContext, error) { - logger.Debug("Starting development runtime. Remember to run the function manually.", "function", r.Function, "target", r.Target) +func (r *RuntimeDevelopment) Start(_ context.Context) (RuntimeContext, error) { + r.log.Debug("Starting development runtime. Remember to run the function manually.", "function", r.Function, "target", r.Target) return RuntimeContext{Target: r.Target, Stop: func(_ context.Context) error { return nil }}, nil } diff --git a/cmd/crank/beta/render/runtime_docker.go b/cmd/crank/render/runtime_docker.go similarity index 81% rename from cmd/crank/beta/render/runtime_docker.go rename to cmd/crank/render/runtime_docker.go index fde36cd58..92cfee7f8 100644 --- a/cmd/crank/beta/render/runtime_docker.go +++ b/cmd/crank/render/runtime_docker.go @@ -31,7 +31,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) // Annotations that can be used to configure the Docker runtime. @@ -56,11 +56,15 @@ const ( // container once rendering is done. AnnotationValueRuntimeDockerCleanupStop DockerCleanup = "Stop" + // AnnotationValueRuntimeDockerCleanupRemove stops and removes the + // container once rendering is done. + AnnotationValueRuntimeDockerCleanupRemove DockerCleanup = "Remove" + // AnnotationValueRuntimeDockerCleanupOrphan leaves the container running // once rendering is done. AnnotationValueRuntimeDockerCleanupOrphan DockerCleanup = "Orphan" - AnnotationValueRuntimeDockerCleanupDefault = AnnotationValueRuntimeDockerCleanupStop + AnnotationValueRuntimeDockerCleanupDefault = AnnotationValueRuntimeDockerCleanupRemove ) // AnnotationKeyRuntimeDockerPullPolicy can be added to a Function to control how its runtime @@ -90,16 +94,19 @@ type RuntimeDocker struct { // Image to run Image string - // Stop container once rendering is done - Stop bool + // Cleanup controls how the containers are handled after rendering. + Cleanup DockerCleanup // PullPolicy controls how the runtime image is pulled. PullPolicy DockerPullPolicy + + // log is the logger for this runtime. + log logging.Logger } // GetDockerPullPolicy extracts PullPolicy configuration from the supplied // Function. -func GetDockerPullPolicy(fn pkgv1beta1.Function) (DockerPullPolicy, error) { +func GetDockerPullPolicy(fn pkgv1.Function) (DockerPullPolicy, error) { switch p := DockerPullPolicy(fn.GetAnnotations()[AnnotationKeyRuntimeDockerPullPolicy]); p { case AnnotationValueRuntimeDockerPullPolicyAlways, AnnotationValueRuntimeDockerPullPolicyNever, AnnotationValueRuntimeDockerPullPolicyIfNotPresent: return p, nil @@ -111,9 +118,9 @@ func GetDockerPullPolicy(fn pkgv1beta1.Function) (DockerPullPolicy, error) { } // GetDockerCleanup extracts Cleanup configuration from the supplied Function. -func GetDockerCleanup(fn pkgv1beta1.Function) (DockerCleanup, error) { +func GetDockerCleanup(fn pkgv1.Function) (DockerCleanup, error) { switch c := DockerCleanup(fn.GetAnnotations()[AnnotationKeyRuntimeDockerCleanup]); c { - case AnnotationValueRuntimeDockerCleanupStop, AnnotationValueRuntimeDockerCleanupOrphan: + case AnnotationValueRuntimeDockerCleanupStop, AnnotationValueRuntimeDockerCleanupOrphan, AnnotationValueRuntimeDockerCleanupRemove: return c, nil case "": return AnnotationValueRuntimeDockerCleanupDefault, nil @@ -124,7 +131,7 @@ func GetDockerCleanup(fn pkgv1beta1.Function) (DockerCleanup, error) { // GetRuntimeDocker extracts RuntimeDocker configuration from the supplied // Function. -func GetRuntimeDocker(fn pkgv1beta1.Function) (*RuntimeDocker, error) { +func GetRuntimeDocker(fn pkgv1.Function, log logging.Logger) (*RuntimeDocker, error) { cleanup, err := GetDockerCleanup(fn) if err != nil { return nil, errors.Wrapf(err, "cannot get cleanup policy for Function %q", fn.GetName()) @@ -138,8 +145,9 @@ func GetRuntimeDocker(fn pkgv1beta1.Function) (*RuntimeDocker, error) { } r := &RuntimeDocker{ Image: fn.Spec.Package, - Stop: cleanup == AnnotationValueRuntimeDockerCleanupStop, + Cleanup: cleanup, PullPolicy: pullPolicy, + log: log, } if i := fn.GetAnnotations()[AnnotationKeyRuntimeDockerImage]; i != "" { r.Image = i @@ -150,8 +158,8 @@ func GetRuntimeDocker(fn pkgv1beta1.Function) (*RuntimeDocker, error) { var _ Runtime = &RuntimeDocker{} // Start a Function as a Docker container. -func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (RuntimeContext, error) { //nolint:gocyclo // TODO(phisco): Refactor to break this up a bit, not so easy. - logger.Debug("Starting Docker container runtime", "image", r.Image) +func (r *RuntimeDocker) Start(ctx context.Context) (RuntimeContext, error) { + r.log.Debug("Starting Docker container runtime", "image", r.Image) c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return RuntimeContext{}, errors.Wrap(err, "cannot create Docker client using environment variables") @@ -182,7 +190,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } if r.PullPolicy == AnnotationValueRuntimeDockerPullPolicyAlways { - logger.Debug("Pulling image with pullPolicy: Always", "image", r.Image) + r.log.Debug("Pulling image with pullPolicy: Always", "image", r.Image) err = PullImage(ctx, c, r.Image) if err != nil { return RuntimeContext{}, errors.Wrapf(err, "cannot pull Docker image %q", r.Image) @@ -190,7 +198,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } // TODO(negz): Set a container name? Presumably unique across runs. - logger.Debug("Creating Docker container", "image", r.Image, "address", addr) + r.log.Debug("Creating Docker container", "image", r.Image, "address", addr) rsp, err := c.ContainerCreate(ctx, cfg, hcfg, nil, nil, "") if err != nil { if !errdefs.IsNotFound(err) || r.PullPolicy == AnnotationValueRuntimeDockerPullPolicyNever { @@ -198,7 +206,7 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti } // The image was not found, but we're allowed to pull it. - logger.Debug("Image not found, pulling", "image", r.Image) + r.log.Debug("Image not found, pulling", "image", r.Image) err = PullImage(ctx, c, r.Image) if err != nil { return RuntimeContext{}, errors.Wrapf(err, "cannot pull Docker image %q", r.Image) @@ -214,15 +222,24 @@ func (r *RuntimeDocker) Start(ctx context.Context, logger logging.Logger) (Runti return RuntimeContext{}, errors.Wrap(err, "cannot start Docker container") } - stop := func(_ context.Context) error { - logger.Debug("Container left running", "container", rsp.ID, "image", r.Image) - return nil - } - if r.Stop { - stop = func(ctx context.Context) error { - err := c.ContainerStop(ctx, rsp.ID, container.StopOptions{}) - return errors.Wrap(err, "cannot stop Docker container") + stop := func(ctx context.Context) error { + switch r.Cleanup { + case AnnotationValueRuntimeDockerCleanupOrphan: + r.log.Debug("Container left running", "container", rsp.ID, "image", r.Image) + return nil + case AnnotationValueRuntimeDockerCleanupStop: + if err := c.ContainerStop(ctx, rsp.ID, container.StopOptions{}); err != nil { + return errors.Wrap(err, "cannot stop Docker container") + } + case AnnotationValueRuntimeDockerCleanupRemove: + if err := c.ContainerStop(ctx, rsp.ID, container.StopOptions{}); err != nil { + return errors.Wrap(err, "cannot stop Docker container") + } + if err := c.ContainerRemove(ctx, rsp.ID, container.RemoveOptions{}); err != nil { + return errors.Wrap(err, "cannot remove Docker container") + } } + return nil } return RuntimeContext{Target: addr, Stop: stop}, nil diff --git a/cmd/crank/beta/render/runtime_docker_test.go b/cmd/crank/render/runtime_docker_test.go similarity index 68% rename from cmd/crank/beta/render/runtime_docker_test.go rename to cmd/crank/render/runtime_docker_test.go index 5646a0d7c..913e5f0d0 100644 --- a/cmd/crank/beta/render/runtime_docker_test.go +++ b/cmd/crank/render/runtime_docker_test.go @@ -26,12 +26,13 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + + pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" ) type mockPullClient struct { - MockPullImage func(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + MockPullImage func(_ context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) } func (m *mockPullClient) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { @@ -42,7 +43,7 @@ var _ pullClient = &mockPullClient{} func TestGetRuntimeDocker(t *testing.T) { type args struct { - fn v1beta1.Function + fn pkgv1.Function } type want struct { rd *RuntimeDocker @@ -57,7 +58,7 @@ func TestGetRuntimeDocker(t *testing.T) { "SuccessAllSet": { reason: "should return a RuntimeDocker with all fields set according to the supplied Function's annotations", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupOrphan), @@ -65,8 +66,8 @@ func TestGetRuntimeDocker(t *testing.T) { AnnotationKeyRuntimeDockerImage: "test-image-from-annotation", }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -75,7 +76,7 @@ func TestGetRuntimeDocker(t *testing.T) { want: want{ rd: &RuntimeDocker{ Image: "test-image-from-annotation", - Stop: false, + Cleanup: AnnotationValueRuntimeDockerCleanupOrphan, PullPolicy: AnnotationValueRuntimeDockerPullPolicyAlways, }, }, @@ -83,12 +84,12 @@ func TestGetRuntimeDocker(t *testing.T) { "SuccessDefaults": { reason: "should return a RuntimeDocker with default fields set if no annotation are set", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -97,7 +98,7 @@ func TestGetRuntimeDocker(t *testing.T) { want: want{ rd: &RuntimeDocker{ Image: "test-package", - Stop: true, + Cleanup: AnnotationValueRuntimeDockerCleanupRemove, PullPolicy: AnnotationValueRuntimeDockerPullPolicyIfNotPresent, }, }, @@ -105,14 +106,14 @@ func TestGetRuntimeDocker(t *testing.T) { "ErrorUnknownAnnotationValueCleanup": { reason: "should return an error if the supplied Function has an unknown cleanup annotation value", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerCleanup: "wrong", }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -125,14 +126,14 @@ func TestGetRuntimeDocker(t *testing.T) { "ErrorUnknownAnnotationPullPolicy": { reason: "should return an error if the supplied Function has an unknown pull policy annotation value", args: args{ - fn: v1beta1.Function{ + fn: pkgv1.Function{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ AnnotationKeyRuntimeDockerPullPolicy: "wrong", }, }, - Spec: v1beta1.FunctionSpec{ - PackageSpec: v1.PackageSpec{ + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ Package: "test-package", }, }, @@ -142,11 +143,35 @@ func TestGetRuntimeDocker(t *testing.T) { err: cmpopts.AnyError, }, }, + "AnnotationsCleanupSetToStop": { + reason: "should return a RuntimeDocker with all fields set according to the supplied Function's annotations", + args: args{ + fn: pkgv1.Function{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + AnnotationKeyRuntimeDockerCleanup: string(AnnotationValueRuntimeDockerCleanupStop), + }, + }, + Spec: pkgv1.FunctionSpec{ + PackageSpec: pkgv1.PackageSpec{ + Package: "test-package", + }, + }, + }, + }, + want: want{ + rd: &RuntimeDocker{ + Image: "test-package", + Cleanup: AnnotationValueRuntimeDockerCleanupStop, + PullPolicy: AnnotationValueRuntimeDockerPullPolicyIfNotPresent, + }, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { - rd, err := GetRuntimeDocker(tc.args.fn) - if diff := cmp.Diff(tc.want.rd, rd); diff != "" { + rd, err := GetRuntimeDocker(tc.args.fn, logging.NewNopLogger()) + if diff := cmp.Diff(tc.want.rd, rd, cmpopts.IgnoreUnexported(RuntimeDocker{})); diff != "" { t.Errorf("\n%s\nGetRuntimeDocker(...): -want, +got:\n%s", tc.reason, diff) } if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { diff --git a/cmd/crank/beta/render/testdata/composition.yaml b/cmd/crank/render/testdata/composition.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/composition.yaml rename to cmd/crank/render/testdata/composition.yaml diff --git a/cmd/crank/beta/render/testdata/extra-resources.yaml b/cmd/crank/render/testdata/extra-resources.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/extra-resources.yaml rename to cmd/crank/render/testdata/extra-resources.yaml diff --git a/cmd/crank/beta/render/testdata/functions.yaml b/cmd/crank/render/testdata/functions.yaml similarity index 93% rename from cmd/crank/beta/render/testdata/functions.yaml rename to cmd/crank/render/testdata/functions.yaml index 9e226a407..8af674fb1 100644 --- a/cmd/crank/beta/render/testdata/functions.yaml +++ b/cmd/crank/render/testdata/functions.yaml @@ -1,5 +1,5 @@ --- -apiVersion: pkg.crossplane.io/v1beta1 +apiVersion: pkg.crossplane.io/v1 kind: Function metadata: name: function-auto-ready diff --git a/cmd/crank/beta/render/testdata/observed.yaml b/cmd/crank/render/testdata/observed.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/observed.yaml rename to cmd/crank/render/testdata/observed.yaml diff --git a/cmd/crank/beta/render/testdata/xr.yaml b/cmd/crank/render/testdata/xr.yaml similarity index 100% rename from cmd/crank/beta/render/testdata/xr.yaml rename to cmd/crank/render/testdata/xr.yaml diff --git a/cmd/crank/version/fetch.go b/cmd/crank/version/fetch.go new file mode 100644 index 000000000..6e246be6d --- /dev/null +++ b/cmd/crank/version/fetch.go @@ -0,0 +1,87 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version contains common functions to get versions +package version + +import ( + "context" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +const ( + errKubeConfig = "failed to get kubeconfig" + errCreateK8sClientset = "could not create the clientset for Kubernetes" + errFetchCrossplaneDeployment = "could not fetch deployments" +) + +// FetchCrossplaneVersion initializes a Kubernetes client and fetches +// and returns the version of the Crossplane deployment. If the version +// does not have a leading 'v', it prepends it. +func FetchCrossplaneVersion(ctx context.Context) (string, error) { + var version string + config, err := ctrl.GetConfig() + if err != nil { + return "", errors.Wrap(err, errKubeConfig) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return "", errors.Wrap(err, errCreateK8sClientset) + } + + deployments, err := clientset.AppsV1().Deployments("").List(ctx, metav1.ListOptions{ + LabelSelector: "app=crossplane", + }) + if err != nil { + return "", errors.Wrap(err, errFetchCrossplaneDeployment) + } + + for _, deployment := range deployments.Items { + v, ok := deployment.Labels["app.kubernetes.io/version"] + if ok { + if !strings.HasPrefix(v, "v") { + version = "v" + v + } + return version, nil + } + + if len(deployment.Spec.Template.Spec.Containers) > 0 { + imageRef := deployment.Spec.Template.Spec.Containers[0].Image + ref, err := name.ParseReference(imageRef) + if err != nil { + return "", errors.Wrap(err, "error parsing image reference") + } + + if tagged, ok := ref.(name.Tag); ok { + imageTag := tagged.TagStr() + if !strings.HasPrefix(imageTag, "v") { + imageTag = "v" + imageTag + } + return imageTag, nil + } + } + } + + return "", errors.New("Crossplane version or image tag not found") +} diff --git a/cmd/crank/version/version.go b/cmd/crank/version/version.go new file mode 100644 index 000000000..13019eb95 --- /dev/null +++ b/cmd/crank/version/version.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version contains version cmd +package version + +import ( + "context" + "fmt" + "time" + + "github.com/alecthomas/kong" + "github.com/pkg/errors" + + "github.com/crossplane/crossplane/internal/version" +) + +const ( + errGetCrossplaneVersion = "unable to get crossplane version" +) + +// Cmd represents the version command. +type Cmd struct { + Client bool `env:"" help:"If true, shows client version only (no server required)."` +} + +// Run runs the version command. +func (c *Cmd) Run(k *kong.Context) error { + _, _ = fmt.Fprintln(k.Stdout, "Client Version: "+version.New().GetVersionString()) + if c.Client { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + vxp, err := FetchCrossplaneVersion(ctx) + if err != nil { + return errors.Wrap(err, errGetCrossplaneVersion) + } + if vxp != "" { + _, _ = fmt.Fprintln(k.Stdout, "Server Version: "+vxp) + } + + return nil +} diff --git a/cmd/crank/xpkg/build.go b/cmd/crank/xpkg/build.go index 95d728f26..1442b9b42 100644 --- a/cmd/crank/xpkg/build.go +++ b/cmd/crank/xpkg/build.go @@ -94,12 +94,12 @@ func (c *buildCmd) AfterApply() error { // buildCmd builds a crossplane package. type buildCmd struct { // Flags. Keep sorted alphabetically. - EmbedRuntimeImage string `placeholder:"NAME" help:"An OCI image to embed in the package as its runtime." xor:"runtime-image"` - EmbedRuntimeImageTarball string `placeholder:"PATH" type:"existingfile" help:"An OCI image tarball to embed in the package as its runtime." xor:"runtime-image"` - ExamplesRoot string `short:"e" type:"path" help:"A directory of example YAML files to include in the package." default:"./examples"` - Ignore []string `placeholder:"PATH" help:"Comma-separated file paths, specified relative to --package-root, to exclude from the package. Wildcards are supported. Directories cannot be excluded."` - PackageFile string `short:"o" type:"path" placeholder:"PATH" help:"The file to write the package to. Defaults to a generated filename in --package-root."` - PackageRoot string `short:"f" type:"existingdir" help:"The directory that contains the package's crossplane.yaml file." default:"."` + EmbedRuntimeImage string `help:"An OCI image to embed in the package as its runtime." placeholder:"NAME" xor:"runtime-image"` + EmbedRuntimeImageTarball string `help:"An OCI image tarball to embed in the package as its runtime." placeholder:"PATH" type:"existingfile" xor:"runtime-image"` + ExamplesRoot string `default:"./examples" help:"A directory of example YAML files to include in the package." short:"e" type:"path"` + Ignore []string `help:"Comma-separated file paths, specified relative to --package-root, to exclude from the package. Wildcards are supported. Directories cannot be excluded." placeholder:"PATH"` + PackageFile string `help:"The file to write the package to. Defaults to a generated filename in --package-root." placeholder:"PATH" short:"o" type:"path"` + PackageRoot string `default:"." help:"The directory that contains the package's crossplane.yaml file." short:"f" type:"existingdir"` // Internal state. These aren't part of the user-exposed CLI structure. fs afero.Fs @@ -148,7 +148,6 @@ func (c *buildCmd) GetRuntimeBaseImageOpts() ([]xpkg.BuildOpt, error) { return []xpkg.BuildOpt{xpkg.WithBase(img)}, nil } return nil, nil - } // GetOutputFileName prepares output file name. diff --git a/cmd/crank/beta/xpkg/init.go b/cmd/crank/xpkg/init.go similarity index 88% rename from cmd/crank/beta/xpkg/init.go rename to cmd/crank/xpkg/init.go index 922b0ed09..09a88245d 100644 --- a/cmd/crank/beta/xpkg/init.go +++ b/cmd/crank/xpkg/init.go @@ -47,6 +47,7 @@ func WellKnownTemplates() map[string]string { "provider-template-upjet": "https://github.com/upbound/upjet-provider-template", "function-template-go": "https://github.com/crossplane/function-template-go", "function-template-python": "https://github.com/crossplane/function-template-python", + "configuration-template": "https://github.com/crossplane/configuration-template", } } @@ -55,8 +56,9 @@ type initCmd struct { Name string `arg:"" help:"The name of the new package to initialize."` Template string `arg:"" help:"The template name or URL to use to initialize the new package."` - Directory string `short:"d" default:"." type:"path" help:"The directory to initialize. It must be empty. It will be created if it doesn't exist."` - RunInitScript bool `short:"r" name:"run-init-script" help:"Runs the init.sh script if it exists without prompting"` + Directory string `default:"." help:"The directory to initialize. It must be empty. It will be created if it doesn't exist." short:"d" type:"path"` + RunInitScript bool `help:"Runs the init.sh script if it exists without prompting" name:"run-init-script" short:"r"` + RefName string `help:"The branch or tag to clone from the template repository." name:"ref-name" short:"b"` } func (c *initCmd) Help() string { @@ -82,14 +84,14 @@ script without prompting. Examples: # Initialize a new Go Composition Function named function-example. - crossplane beta xpkg init function-example function-template-go - + crossplane xpkg init function-example function-template-go + # Initialize a new Provider named provider-example from a custom template. - crossplane beta xpkg init provider-example https://github.com/crossplane/provider-template-custom + crossplane xpkg init provider-example https://github.com/crossplane/provider-template-custom # Initialize a new Go Composition Function named function-example and run # its init.sh script (if it exists) without prompting the user or displaying its contents. - crossplane beta xpkg init function-example function-template-go --run-init-script + crossplane xpkg init function-example function-template-go --run-init-script ` b := strings.Builder{} @@ -100,13 +102,13 @@ Examples: return fmt.Sprintf(tpl, b.String()) } -func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // file check switch and print error check make it over the top +func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { f, err := os.Stat(c.Directory) switch { case err == nil && !f.IsDir(): return errors.Errorf("path %s is not a directory", c.Directory) case os.IsNotExist(err): - if err := os.MkdirAll(c.Directory, 0750); err != nil { + if err := os.MkdirAll(c.Directory, 0o750); err != nil { return errors.Wrapf(err, "failed to create directory %s", c.Directory) } logger.Debug("Created directory", "path", c.Directory) @@ -127,8 +129,9 @@ func (c *initCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:g fs := osfs.New(c.Directory, osfs.WithBoundOS()) r, err := git.Clone(memory.NewStorage(), fs, &git.CloneOptions{ - URL: repoURL, - Depth: 1, + URL: repoURL, + Depth: 1, + ReferenceName: plumbing.ReferenceName(c.RefName), }) if err != nil { return errors.Wrapf(err, "failed to clone repository from %q", repoURL) @@ -225,6 +228,7 @@ func printFile(w io.Writer, path string) error { if err != nil { return errors.Wrapf(err, "failed to open file %s", path) } + defer f.Close() //nolint:errcheck // It's safe to ignore the error because it only do read operation. content, err := io.ReadAll(f) if err != nil { return errors.Wrapf(err, "failed to read file %s", path) diff --git a/cmd/crank/beta/xpkg/init_test.go b/cmd/crank/xpkg/init_test.go similarity index 97% rename from cmd/crank/beta/xpkg/init_test.go rename to cmd/crank/xpkg/init_test.go index 032bade05..c905012b6 100644 --- a/cmd/crank/beta/xpkg/init_test.go +++ b/cmd/crank/xpkg/init_test.go @@ -57,7 +57,7 @@ func TestHandleNotes(t *testing.T) { logger := logging.NewNopLogger() dir := t.TempDir() if tc.args.file != "" { - if err := os.WriteFile(filepath.Join(dir, notes), []byte(tc.args.file), 0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, notes), []byte(tc.args.file), 0o644); err != nil { t.Fatalf("writeFile() error = %v", err) } } diff --git a/cmd/crank/xpkg/install.go b/cmd/crank/xpkg/install.go index b09b1d7fd..ce69b9b38 100644 --- a/cmd/crank/xpkg/install.go +++ b/cmd/crank/xpkg/install.go @@ -53,16 +53,16 @@ const ( // installCmd installs a package. type installCmd struct { // Arguments. - Kind string `arg:"" help:"The kind of package to install. One of \"provider\", \"configuration\", or \"function\"." enum:"provider,configuration,function"` + Kind string `arg:"" enum:"provider,configuration,function" help:"The kind of package to install. One of \"provider\", \"configuration\", or \"function\"."` Package string `arg:"" help:"The package to install."` - Name string `arg:"" optional:"" help:"The name of the new package in the Crossplane API. Derived from the package repository and tag by default."` + Name string `arg:"" help:"The name of the new package in the Crossplane API. Derived from the package repository and tag by default." optional:""` // Flags. Keep sorted alphabetically. - RuntimeConfig string `placeholder:"NAME" help:"Install the package with a runtime configuration (for example a DeploymentRuntimeConfig)."` - ManualActivation bool `short:"m" help:"Require the new package's first revision to be manually activated."` - PackagePullSecrets []string `placeholder:"NAME" help:"A comma-separated list of secrets the package manager should use to pull the package from the registry."` - RevisionHistoryLimit int64 `short:"r" placeholder:"LIMIT" help:"How many package revisions may exist before the oldest revisions are deleted."` - Wait time.Duration `short:"w" default:"0s" help:"How long to wait for the package to install before returning. The command does not wait by default. Returns an error if the timeout is exceeded."` + RuntimeConfig string `help:"Install the package with a runtime configuration (for example a DeploymentRuntimeConfig)." placeholder:"NAME"` + ManualActivation bool `help:"Require the new package's first revision to be manually activated." short:"m"` + PackagePullSecrets []string `help:"A comma-separated list of secrets the package manager should use to pull the package from the registry." placeholder:"NAME"` + RevisionHistoryLimit int64 `help:"How many package revisions may exist before the oldest revisions are deleted." placeholder:"LIMIT" short:"r"` + Wait time.Duration `default:"0s" help:"How long to wait for the package to install before returning. The command does not wait by default. Returns an error if the timeout is exceeded." short:"w"` } func (c *installCmd) Help() string { @@ -84,7 +84,7 @@ Examples: } // Run the package install cmd. -func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { //nolint:gocyclo // TODO(negz): Can anything be broken out here? +func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { pkgName := c.Name if pkgName == "" { ref, err := name.ParseReference(c.Package, name.WithDefaultRegistry(xpkg.DefaultRegistry)) @@ -132,9 +132,9 @@ func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { //nolin Spec: v1.ConfigurationSpec{PackageSpec: spec}, } case "function": - pkg = &v1beta1.Function{ + pkg = &v1.Function{ ObjectMeta: metav1.ObjectMeta{Name: pkgName}, - Spec: v1beta1.FunctionSpec{PackageSpec: spec}, + Spec: v1.FunctionSpec{PackageSpec: spec}, } default: // The enum struct tag on the Kind field should make this impossible. @@ -200,7 +200,6 @@ func (c *installCmd) Run(k *kong.Context, logger logging.Logger) error { //nolin if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { return errors.Wrap(err, "Package did not become ready") } - } _, err = fmt.Fprintf(k.Stdout, "%s/%s created\n", c.Kind, pkg.GetName()) diff --git a/cmd/crank/xpkg/login.go b/cmd/crank/xpkg/login.go index b0afe6d76..6acc2757b 100644 --- a/cmd/crank/xpkg/login.go +++ b/cmd/crank/xpkg/login.go @@ -49,9 +49,9 @@ const ( type loginCmd struct { // Flags. We're intentionally making an exception to the rule here and not // sorting these alphabetically. - Username string `short:"u" env:"UP_USER" xor:"identifier" help:"Username used to authenticate."` - Password string `short:"p" env:"UP_PASSWORD" help:"Password for specified username. '-' to read from stdin."` - Token string `short:"t" env:"UP_TOKEN" xor:"identifier" help:"Token used to authenticate. '-' to read from stdin."` + Username string `env:"UP_USER" help:"Username used to authenticate." short:"u" xor:"identifier"` + Password string `env:"UP_PASSWORD" help:"Password for specified username. '-' to read from stdin." short:"p"` + Token string `env:"UP_TOKEN" help:"Token used to authenticate. '-' to read from stdin." short:"t" xor:"identifier"` // Common Upbound API configuration. upbound.Flags `embed:""` @@ -100,7 +100,7 @@ func (c *loginCmd) AfterApply(kongCtx *kong.Context) error { } // Run executes the login command. -func (c *loginCmd) Run(k *kong.Context, upCtx *upbound.Context) error { //nolint:gocyclo // TODO(phisco): refactor +func (c *loginCmd) Run(k *kong.Context, upCtx *upbound.Context) error { auth, profType, err := constructAuth(c.Username, c.Token, c.Password) if err != nil { return errors.Wrap(err, "failed to construct auth") @@ -162,8 +162,8 @@ func (c *loginCmd) Run(k *kong.Context, upCtx *upbound.Context) error { //nolint if err := upCtx.CfgSrc.UpdateConfig(upCtx.Cfg); err != nil { return errors.Wrap(err, "failed to update config") } - fmt.Fprintln(k.Stdout, "Login successful.") - return nil + _, err = fmt.Fprintln(k.Stdout, "Login successful.") + return err } func (c *loginCmd) setupCredentials() error { @@ -204,21 +204,25 @@ func getPassword(f *os.File) (string, error) { if !term.IsTerminal(int(f.Fd())) { return "", errors.New("not a terminal") } - fmt.Fprintf(f, "Password: ") + if _, err := fmt.Fprintf(f, "Password: "); err != nil { + return "", err + } password, err := term.ReadPassword(int(f.Fd())) if err != nil { return "", err } // Print a new line because ReadPassword does not. - _, _ = fmt.Fprintf(f, "\n") - return string(password), nil - + _, err = fmt.Fprintf(f, "\n") + return string(password), err } + func getUsername(f *os.File) (string, error) { if !term.IsTerminal(int(f.Fd())) { return "", errors.New("not a terminal") } - fmt.Fprintf(f, "Username: ") + if _, err := fmt.Fprintf(f, "Username: "); err != nil { + return "", err + } reader := bufio.NewReader(f) s, err := reader.ReadString('\n') if err != nil { diff --git a/cmd/crank/xpkg/push.go b/cmd/crank/xpkg/push.go index eca0aee0d..1aceab488 100644 --- a/cmd/crank/xpkg/push.go +++ b/cmd/crank/xpkg/push.go @@ -61,7 +61,7 @@ type pushCmd struct { Package string `arg:"" help:"Where to push the package."` // Flags. Keep sorted alphabetically. - PackageFiles []string `short:"f" type:"existingfile" placeholder:"PATH" help:"A comma-separated list of xpkg files to push."` + PackageFiles []string `help:"A comma-separated list of xpkg files to push." placeholder:"PATH" short:"f" type:"existingfile"` // Common Upbound API configuration. upbound.Flags `embed:""` @@ -94,7 +94,7 @@ func (c *pushCmd) AfterApply() error { } // Run runs the push cmd. -func (c *pushCmd) Run(logger logging.Logger) error { //nolint:gocyclo // This feels easier to read as-is. +func (c *pushCmd) Run(logger logging.Logger) error { //nolint:gocognit // This feels easier to read as-is. upCtx, err := upbound.NewFromFlags(c.Flags, upbound.AllowMissingProfile()) if err != nil { return err @@ -152,7 +152,6 @@ func (c *pushCmd) Run(logger logging.Logger) error { //nolint:gocyclo // This fe adds := make([]mutate.IndexAddendum, len(c.PackageFiles)) g, ctx := errgroup.WithContext(context.Background()) for i, file := range c.PackageFiles { - i, file := i, file // Pin range variables for use in goroutine g.Go(func() error { img, err := tarball.ImageFromPath(filepath.Clean(file), nil) if err != nil { diff --git a/cmd/crank/beta/xpkg/testdata/NOTES.txt b/cmd/crank/xpkg/testdata/NOTES.txt similarity index 100% rename from cmd/crank/beta/xpkg/testdata/NOTES.txt rename to cmd/crank/xpkg/testdata/NOTES.txt diff --git a/cmd/crank/xpkg/update.go b/cmd/crank/xpkg/update.go index 97f532246..70c7c57b4 100644 --- a/cmd/crank/xpkg/update.go +++ b/cmd/crank/xpkg/update.go @@ -42,9 +42,9 @@ import ( // updateCmd updates a package. type updateCmd struct { // Arguments. - Kind string `arg:"" help:"The kind of package to update. One of \"provider\", \"configuration\", or \"function\"." enum:"provider,configuration,function"` + Kind string `arg:"" enum:"provider,configuration,function" help:"The kind of package to update. One of \"provider\", \"configuration\", or \"function\"."` Package string `arg:"" help:"The package to update to."` - Name string `arg:"" optional:"" help:"The name of the package to update in the Crossplane API. Derived from the package repository and tag by default."` + Name string `arg:"" help:"The name of the package to update in the Crossplane API. Derived from the package repository and tag by default." optional:""` } func (c *updateCmd) Help() string { @@ -85,7 +85,7 @@ func (c *updateCmd) Run(k *kong.Context, logger logging.Logger) error { case "configuration": pkg = &v1.Configuration{} case "function": - pkg = &v1beta1.Function{} + pkg = &v1.Function{} default: // The enum struct tag on the Kind field should make this impossible. return errors.Errorf("unsupported package kind %q", c.Kind) diff --git a/cmd/crank/xpkg/xpkg.go b/cmd/crank/xpkg/xpkg.go index 82c9058af..f996600ee 100644 --- a/cmd/crank/xpkg/xpkg.go +++ b/cmd/crank/xpkg/xpkg.go @@ -23,6 +23,7 @@ package xpkg type Cmd struct { // Keep subcommands sorted alphabetically. Build buildCmd `cmd:"" help:"Build a new package."` + Init initCmd `cmd:"" help:"Initialize a new package from a template."` Install installCmd `cmd:"" help:"Install a package in a control plane."` Login loginCmd `cmd:"" help:"Login to the default package registry."` Logout logoutCmd `cmd:"" help:"Logout of the default package registry."` diff --git a/cmd/crossplane/core/core.go b/cmd/crossplane/core/core.go index d1e09a42b..624f64199 100644 --- a/cmd/crossplane/core/core.go +++ b/cmd/crossplane/core/core.go @@ -21,6 +21,7 @@ import ( "context" "crypto/tls" "fmt" + "io" "os" "path/filepath" "time" @@ -30,6 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" + kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -44,11 +46,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/feature" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane/internal/controller/apiextensions" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" "github.com/crossplane/crossplane/internal/controller/pkg" pkgcontroller "github.com/crossplane/crossplane/internal/controller/pkg/controller" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/initializer" "github.com/crossplane/crossplane/internal/metrics" @@ -60,7 +64,7 @@ import ( "github.com/crossplane/crossplane/internal/xpkg" ) -// Command runs the core crossplane controllers +// Command runs the core crossplane controllers. type Command struct { Start startCommand `cmd:"" help:"Start Crossplane controllers."` Init initCommand `cmd:"" help:"Make cluster ready for Crossplane controllers."` @@ -68,9 +72,9 @@ type Command struct { // KongVars represent the kong variables associated with the CLI parser // required for the Registry default variable interpolation. -var KongVars = kong.Vars{ +var KongVars = kong.Vars{ //nolint:gochecknoglobals // We treat these as constants. "default_registry": xpkg.DefaultRegistry, - "default_user_agent": transport.DefaultUserAgent, + "default_user_agent": transport.DefaultUserAgent(), } // Run is the no-op method required for kong call tree @@ -81,28 +85,28 @@ func (c *Command) Run() error { } type startCommand struct { - Profile string `placeholder:"host:port" help:"Serve runtime profiling data via HTTP at /debug/pprof."` + Profile string `help:"Serve runtime profiling data via HTTP at /debug/pprof." placeholder:"host:port"` - Namespace string `short:"n" help:"Namespace used to unpack and run packages." default:"crossplane-system" env:"POD_NAMESPACE"` - ServiceAccount string `help:"Name of the Crossplane Service Account." default:"crossplane" env:"POD_SERVICE_ACCOUNT"` - CacheDir string `short:"c" help:"Directory used for caching package images." default:"/cache" env:"CACHE_DIR"` - LeaderElection bool `short:"l" help:"Use leader election for the controller manager." default:"false" env:"LEADER_ELECTION"` - Registry string `short:"r" help:"Default registry used to fetch packages when not specified in tag." default:"${default_registry}" env:"REGISTRY"` - CABundlePath string `help:"Additional CA bundle to use when fetching packages from registry." env:"CA_BUNDLE_PATH"` - UserAgent string `help:"The User-Agent header that will be set on all package requests." default:"${default_user_agent}" env:"USER_AGENT"` + Namespace string `default:"crossplane-system" env:"POD_NAMESPACE" help:"Namespace used to unpack and run packages." short:"n"` + ServiceAccount string `default:"crossplane" env:"POD_SERVICE_ACCOUNT" help:"Name of the Crossplane Service Account."` + CacheDir string `default:"/cache" env:"CACHE_DIR" help:"Directory used for caching package images." short:"c"` + LeaderElection bool `default:"false" env:"LEADER_ELECTION" help:"Use leader election for the controller manager." short:"l"` + Registry string `default:"${default_registry}" env:"REGISTRY" help:"Default registry used to fetch packages when not specified in tag." short:"r"` + CABundlePath string `env:"CA_BUNDLE_PATH" help:"Additional CA bundle to use when fetching packages from registry."` + UserAgent string `default:"${default_user_agent}" env:"USER_AGENT" help:"The User-Agent header that will be set on all package requests."` - PackageRuntime string `helm:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)" default:"Deployment" env:"PACKAGE_RUNTIME"` + PackageRuntime string `default:"Deployment" env:"PACKAGE_RUNTIME" help:"The package runtime to use for packages with a runtime (e.g. Providers and Functions)"` - SyncInterval time.Duration `short:"s" help:"How often all resources will be double-checked for drift from the desired state." default:"1h"` - PollInterval time.Duration `help:"How often individual resources will be checked for drift from the desired state." default:"1m"` - MaxReconcileRate int `help:"The global maximum rate per second at which resources may checked for drift from the desired state." default:"10"` + SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` + PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` + MaxReconcileRate int `default:"100" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` - WebhookEnabled bool `help:"Enable webhook configuration." default:"true" env:"WEBHOOK_ENABLED"` + WebhookEnabled bool `default:"true" env:"WEBHOOK_ENABLED" help:"Enable webhook configuration."` - TLSServerSecretName string `help:"The name of the TLS Secret that will store Crossplane's server certificate." env:"TLS_SERVER_SECRET_NAME"` - TLSServerCertsDir string `help:"The path of the folder which will store TLS server certificate of Crossplane." env:"TLS_SERVER_CERTS_DIR"` - TLSClientSecretName string `help:"The name of the TLS Secret that will be store Crossplane's client certificate." env:"TLS_CLIENT_SECRET_NAME"` - TLSClientCertsDir string `help:"The path of the folder which will store TLS client certificate of Crossplane." env:"TLS_CLIENT_CERTS_DIR"` + TLSServerSecretName string `env:"TLS_SERVER_SECRET_NAME" help:"The name of the TLS Secret that will store Crossplane's server certificate."` + TLSServerCertsDir string `env:"TLS_SERVER_CERTS_DIR" help:"The path of the folder which will store TLS server certificate of Crossplane."` + TLSClientSecretName string `env:"TLS_CLIENT_SECRET_NAME" help:"The name of the TLS Secret that will be store Crossplane's client certificate."` + TLSClientCertsDir string `env:"TLS_CLIENT_CERTS_DIR" help:"The path of the folder which will store TLS client certificate of Crossplane."` EnableEnvironmentConfigs bool `group:"Alpha Features:" help:"Enable support for EnvironmentConfigs."` EnableExternalSecretStores bool `group:"Alpha Features:" help:"Enable support for External Secret Stores."` @@ -117,20 +121,23 @@ type startCommand struct { // a default DeploymentRuntimeConfig. EnableProviderIdentity bool `group:"Alpha Features:" help:"Enable support for Provider identity."` - EnableCompositionFunctions bool `group:"Beta Features:" default:"true" help:"Enable support for Composition Functions."` - EnableCompositionFunctionsExtraResources bool `group:"Beta Features:" default:"true" help:"Enable support for Composition Functions Extra Resources. Only respected if --enable-composition-functions is set to true."` - EnableCompositionWebhookSchemaValidation bool `group:"Beta Features:" default:"true" help:"Enable support for Composition validation using schemas."` - EnableDeploymentRuntimeConfigs bool `group:"Beta Features:" default:"true" help:"Enable support for Deployment Runtime Configs."` + EnableCompositionWebhookSchemaValidation bool `default:"true" group:"Beta Features:" help:"Enable support for Composition validation using schemas."` + EnableDeploymentRuntimeConfigs bool `default:"true" group:"Beta Features:" help:"Enable support for Deployment Runtime Configs."` // These are GA features that previously had alpha or beta feature flags. // You can't turn off a GA feature. We maintain the flags to avoid breaking // folks who are passing them, but they do nothing. The flags are hidden so // they don't show up in the help output. - EnableCompositionRevisions bool `default:"true" hidden:""` + EnableCompositionRevisions bool `default:"true" hidden:""` + EnableCompositionFunctions bool `default:"true" hidden:""` + EnableCompositionFunctionsExtraResources bool `default:"true" hidden:""` } // Run core Crossplane controllers. -func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //nolint:gocyclo // Only slightly over. +func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //nolint:gocognit // Only slightly over. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := ctrl.GetConfig() if err != nil { return errors.Wrap(err, "cannot get config") @@ -141,6 +148,8 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli Deduplicate: true, }) + // The claim and XR controllers don't use the manager's cache or client. + // They use their own. They're setup later in this method. eb := record.NewBroadcaster() mgr, err := ctrl.NewManager(ratelimiter.LimitRESTConfig(cfg, c.MaxReconcileRate), ctrl.Options{ Scheme: s, @@ -203,43 +212,37 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli } if !c.EnableCompositionRevisions { - log.Info("CompositionRevisions feature is GA and cannot be disabled. The --enable-composition-revisions flag will be removed in a future release.") + log.Info("Composition Revisions are GA and cannot be disabled. The --enable-composition-revisions flag will be removed in a future release.") + } + if !c.EnableCompositionFunctions { + log.Info("Composition Functions are GA and cannot be disabled. The --enable-composition-functions flag will be removed in a future release.") + } + if !c.EnableCompositionFunctionsExtraResources { + log.Info("Extra Resources are GA and cannot be disabled. The --enable-composition-functions-extra-resources flag will be removed in a future release.") } - var functionRunner *xfn.PackagedFunctionRunner - if c.EnableCompositionFunctions { - o.Features.Enable(features.EnableBetaCompositionFunctions) - log.Info("Beta feature enabled", "flag", features.EnableBetaCompositionFunctions) + clienttls, err := certificates.LoadMTLSConfig( + filepath.Join(c.TLSClientCertsDir, initializer.SecretKeyCACert), + filepath.Join(c.TLSClientCertsDir, corev1.TLSCertKey), + filepath.Join(c.TLSClientCertsDir, corev1.TLSPrivateKeyKey), + false) + if err != nil { + return errors.Wrap(err, "cannot load client TLS certificates") + } - if c.EnableCompositionFunctionsExtraResources { - o.Features.Enable(features.EnableBetaCompositionFunctionsExtraResources) - log.Info("Beta feature enabled", "flag", features.EnableBetaCompositionFunctionsExtraResources) - } + m := xfn.NewMetrics() + metrics.Registry.MustRegister(m) - clienttls, err := certificates.LoadMTLSConfig( - filepath.Join(c.TLSClientCertsDir, initializer.SecretKeyCACert), - filepath.Join(c.TLSClientCertsDir, corev1.TLSCertKey), - filepath.Join(c.TLSClientCertsDir, corev1.TLSPrivateKeyKey), - false) - if err != nil { - return errors.Wrap(err, "cannot load client TLS certificates") - } - - m := xfn.NewMetrics() - metrics.Registry.MustRegister(m) + // We want all XR controllers to share the same gRPC clients. + functionRunner := xfn.NewPackagedFunctionRunner(mgr.GetClient(), + xfn.WithLogger(log), + xfn.WithTLSConfig(clienttls), + xfn.WithInterceptorCreators(m), + ) - // We want all XR controllers to share the same gRPC clients. - functionRunner = xfn.NewPackagedFunctionRunner(mgr.GetClient(), - xfn.WithLogger(log), - xfn.WithTLSConfig(clienttls), - xfn.WithInterceptorCreators(m), - ) + // Periodically remove clients for Functions that no longer exist. + go functionRunner.GarbageCollectConnections(ctx, 10*time.Minute) - // Periodically remove clients for Functions that no longer exist. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go functionRunner.GarbageCollectConnections(ctx, 10*time.Minute) - } if c.EnableEnvironmentConfigs { o.Features.Enable(features.EnableAlphaEnvironmentConfigs) log.Info("Alpha feature enabled", "flag", features.EnableAlphaEnvironmentConfigs) @@ -282,9 +285,89 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { //noli log.Info("Alpha feature enabled", "flag", features.EnableAlphaClaimSSA) } + // Claim and XR controllers are started and stopped dynamically by the + // ControllerEngine below. When realtime compositions are enabled, they also + // start and stop their watches (e.g. of composed resources) dynamically. To + // do this, the ControllerEngine must have exclusive ownership of a cache. + // This allows it to track what controllers are using the cache's informers. + ca, err := cache.New(mgr.GetConfig(), cache.Options{ + HTTPClient: mgr.GetHTTPClient(), + Scheme: mgr.GetScheme(), + Mapper: mgr.GetRESTMapper(), + SyncPeriod: &c.SyncInterval, + + // When a CRD is deleted, any informers for its GVKs will start trying + // to restart their watches, and fail with scary errors. This should + // only happen when realtime composition is enabled, and we should GC + // the informer within 60 seconds. This handler tries to make the error + // a little more informative, and less scary. + DefaultWatchErrorHandler: func(_ *kcache.Reflector, err error) { + if errors.Is(io.EOF, err) { + // Watch closed normally. + return + } + log.Debug("Watch error - probably due to CRD being uninstalled", "error", err) + }, + }) + if err != nil { + return errors.Wrap(err, "cannot create cache for API extension controllers") + } + + go func() { + // Don't start the cache until the manager is elected. + <-mgr.Elected() + + if err := ca.Start(ctx); err != nil { + log.Info("API extensions cache returned an error", "error", err) + } + + log.Info("API extensions cache stopped") + }() + + cl, err := client.New(mgr.GetConfig(), client.Options{ + HTTPClient: mgr.GetHTTPClient(), + Scheme: mgr.GetScheme(), + Mapper: mgr.GetRESTMapper(), + Cache: &client.CacheOptions{ + Reader: ca, + + // Don't cache secrets - there may be a lot of them. + DisableFor: []client.Object{&corev1.Secret{}}, + + // Cache unstructured resources (like XRs and MRs) on Get and List. + Unstructured: true, + }, + }) + if err != nil { + return errors.Wrap(err, "cannot create client for API extension controllers") + } + + // It's important the engine's client is wrapped with unstructured.NewClient + // because controller-runtime always caches *unstructured.Unstructured, not + // our wrapper types like *composite.Unstructured. This client takes care of + // automatically wrapping and unwrapping *unstructured.Unstructured. + ce := engine.New(mgr, + engine.TrackInformers(ca, mgr.GetScheme()), + unstructured.NewClient(cl), + engine.WithLogger(log), + ) + + // TODO(negz): Garbage collect informers for CRs that are still defined + // (i.e. still have CRDs) but aren't used? Currently if an XR starts + // composing a kind of CR then stops, we won't stop the unused informer + // until the CRD that defines the CR is deleted. That could never happen. + // Consider for example composing two types of MR from the same provider, + // then updating to compose only one. + + // Garbage collect informers for custom resources when their CRD is deleted. + if err := ce.GarbageCollectCustomResourceInformers(ctx); err != nil { + return errors.Wrap(err, "cannot start garbage collector for custom resource informers") + } + ao := apiextensionscontroller.Options{ - Options: o, - FunctionRunner: functionRunner, + Options: o, + ControllerEngine: ce, + FunctionRunner: functionRunner, } if err := apiextensions.Setup(mgr, ao); err != nil { diff --git a/cmd/crossplane/core/init.go b/cmd/crossplane/core/init.go index fbe6ba0aa..52a784bc3 100644 --- a/cmd/crossplane/core/init.go +++ b/cmd/crossplane/core/init.go @@ -34,19 +34,20 @@ import ( // initCommand configuration for the initialization of core Crossplane controllers. type initCommand struct { - Providers []string `name:"provider" help:"Pre-install a Provider by giving its image URI. This argument can be repeated."` - Configurations []string `name:"configuration" help:"Pre-install a Configuration by giving its image URI. This argument can be repeated."` - Namespace string `short:"n" help:"Namespace used to set as default scope in default secret store config." default:"crossplane-system" env:"POD_NAMESPACE"` - ServiceAccount string `help:"Name of the Crossplane Service Account." default:"crossplane" env:"POD_SERVICE_ACCOUNT"` + Providers []string `help:"Pre-install a Provider by giving its image URI. This argument can be repeated." name:"provider"` + Configurations []string `help:"Pre-install a Configuration by giving its image URI. This argument can be repeated." name:"configuration"` + Functions []string `help:"Pre-install a Function by giving its image URI. This argument can be repeated." name:"function"` + Namespace string `default:"crossplane-system" env:"POD_NAMESPACE" help:"Namespace used to set as default scope in default secret store config." short:"n"` + ServiceAccount string `default:"crossplane" env:"POD_SERVICE_ACCOUNT" help:"Name of the Crossplane Service Account."` - WebhookEnabled bool `help:"Enable webhook configuration." default:"true" env:"WEBHOOK_ENABLED"` - WebhookServiceName string `help:"The name of the Service object that the webhook service will be run." env:"WEBHOOK_SERVICE_NAME"` - WebhookServiceNamespace string `help:"The namespace of the Service object that the webhook service will be run." env:"WEBHOOK_SERVICE_NAMESPACE"` - WebhookServicePort int32 `help:"The port of the Service that the webhook service will be run." env:"WEBHOOK_SERVICE_PORT"` - ESSTLSServerSecretName string `help:"The name of the Secret that the initializer will fill with ESS TLS server certificate." env:"ESS_TLS_SERVER_SECRET_NAME"` - TLSCASecretName string `help:"The name of the Secret that the initializer will fill with TLS CA certificate." env:"TLS_CA_SECRET_NAME"` - TLSServerSecretName string `help:"The name of the Secret that the initializer will fill with TLS server certificates." env:"TLS_SERVER_SECRET_NAME"` - TLSClientSecretName string `help:"The name of the Secret that the initializer will fill with TLS client certificates." env:"TLS_CLIENT_SECRET_NAME"` + WebhookEnabled bool `default:"true" env:"WEBHOOK_ENABLED" help:"Enable webhook configuration."` + WebhookServiceName string `env:"WEBHOOK_SERVICE_NAME" help:"The name of the Service object that the webhook service will be run."` + WebhookServiceNamespace string `env:"WEBHOOK_SERVICE_NAMESPACE" help:"The namespace of the Service object that the webhook service will be run."` + WebhookServicePort int32 `env:"WEBHOOK_SERVICE_PORT" help:"The port of the Service that the webhook service will be run."` + ESSTLSServerSecretName string `env:"ESS_TLS_SERVER_SECRET_NAME" help:"The name of the Secret that the initializer will fill with ESS TLS server certificate."` + TLSCASecretName string `env:"TLS_CA_SECRET_NAME" help:"The name of the Secret that the initializer will fill with TLS CA certificate."` + TLSServerSecretName string `env:"TLS_SERVER_SECRET_NAME" help:"The name of the Secret that the initializer will fill with TLS server certificates."` + TLSClientSecretName string `env:"TLS_CLIENT_SECRET_NAME" help:"The name of the Secret that the initializer will fill with TLS client certificates."` } // Run starts the initialization process. @@ -73,6 +74,8 @@ func (c *initCommand) Run(s *runtime.Scheme, log logging.Logger) error { initializer.NewTLSCertificateGenerator(c.Namespace, c.TLSCASecretName, tlsGeneratorOpts...), initializer.NewCoreCRDsMigrator("compositionrevisions.apiextensions.crossplane.io", "v1alpha1"), initializer.NewCoreCRDsMigrator("locks.pkg.crossplane.io", "v1alpha1"), + initializer.NewCoreCRDsMigrator("functions.pkg.crossplane.io", "v1beta1"), + initializer.NewCoreCRDsMigrator("functionrevisions.pkg.crossplane.io", "v1beta1"), ) if c.WebhookEnabled { nn := types.NamespacedName{ @@ -101,7 +104,7 @@ func (c *initCommand) Run(s *runtime.Scheme, log logging.Logger) error { } steps = append(steps, initializer.NewLockObject(), - initializer.NewPackageInstaller(c.Providers, c.Configurations), + initializer.NewPackageInstaller(c.Providers, c.Configurations, c.Functions), initializer.NewStoreConfigObject(c.Namespace), initializer.StepFunc(initializer.DefaultDeploymentRuntimeConfig), ) diff --git a/cmd/crossplane/main.go b/cmd/crossplane/main.go index 6eab7c47f..905df688f 100644 --- a/cmd/crossplane/main.go +++ b/cmd/crossplane/main.go @@ -41,15 +41,17 @@ import ( "github.com/crossplane/crossplane/internal/version" ) -type debugFlag bool -type versionFlag bool +type ( + debugFlag bool + versionFlag bool +) -var cli struct { - Debug debugFlag `short:"d" help:"Print verbose logging statements."` +type cli struct { + Debug debugFlag `help:"Print verbose logging statements." short:"d"` - Version versionFlag `short:"v" help:"Print version and quit."` + Version versionFlag `help:"Print version and quit." short:"v"` - Core core.Command `cmd:"" help:"Start core Crossplane controllers." default:"withargs"` + Core core.Command `cmd:"" default:"withargs" help:"Start core Crossplane controllers."` Rbac rbac.Command `cmd:"" help:"Start Crossplane RBAC Manager controllers."` } @@ -68,22 +70,26 @@ func (d debugFlag) BeforeApply(ctx *kong.Context) error { //nolint:unparam // Be // *very* verbose even at info level, so we only provide it a real // logger when we're running in debug mode. ctrl.SetLogger(zl) + logging.SetFilteredKlogLogger(zl) return nil } func (v versionFlag) BeforeApply(app *kong.Kong) error { //nolint:unparam // BeforeApply requires this signature. - fmt.Fprintln(app.Stdout, version.New().GetVersionString()) + _, _ = fmt.Fprintln(app.Stdout, version.New().GetVersionString()) app.Exit(0) return nil } func main() { zl := zap.New().WithName("crossplane") + logging.SetFilteredKlogLogger(zl) + // Setting the controller-runtime logger to a no-op logger by default, // unless debug mode is enabled. This is because the controller-runtime // logger is *very* verbose even at info level. This is not really needed, // but otherwise we get a warning from the controller-runtime. ctrl.SetLogger(zap.New(zap.WriteTo(io.Discard))) + // Note that the controller managers scheme must be a superset of the // package manager's object scheme; it must contain all object types that // may appear in a Crossplane package. This is because the package manager @@ -91,7 +97,7 @@ func main() { // objects. s := runtime.NewScheme() - ctx := kong.Parse(&cli, + ctx := kong.Parse(&cli{}, kong.Name("crossplane"), kong.Description("An open source multicloud control plane."), kong.BindTo(logging.NewLogrLogger(zl), (*logging.Logger)(nil)), diff --git a/cmd/crossplane/rbac/rbac.go b/cmd/crossplane/rbac/rbac.go index 469d81a1c..ca8dcdb70 100644 --- a/cmd/crossplane/rbac/rbac.go +++ b/cmd/crossplane/rbac/rbac.go @@ -18,7 +18,6 @@ limitations under the License. package rbac import ( - "strings" "time" "github.com/alecthomas/kong" @@ -37,26 +36,13 @@ import ( "github.com/crossplane/crossplane/internal/xpkg" ) -// Available RBAC management policies. -const ( - ManagementPolicyAll = string(rbaccontroller.ManagementPolicyAll) - ManagementPolicyBasic = string(rbaccontroller.ManagementPolicyBasic) -) - // KongVars represent the kong variables associated with the CLI parser // required for the RBAC enum interpolation. -var KongVars = kong.Vars{ - "rbac_manage_default_var": ManagementPolicyBasic, - "rbac_manage_enum_var": strings.Join( - []string{ - ManagementPolicyAll, - ManagementPolicyBasic, - }, - ", "), +var KongVars = kong.Vars{ //nolint:gochecknoglobals // We treat these as constants. "rbac_default_registry": xpkg.DefaultRegistry, } -// Command runs the crossplane RBAC controllers +// Command runs the crossplane RBAC controllers. type Command struct { Start startCommand `cmd:"" help:"Start Crossplane RBAC controllers."` Init initCommand `cmd:"" help:"Initialize RBAC Manager."` @@ -70,28 +56,19 @@ func (c *Command) Run() error { } type startCommand struct { - Profile string `placeholder:"host:port" help:"Serve runtime profiling data via HTTP at /debug/pprof."` - - ProviderClusterRole string `name:"provider-clusterrole" help:"A ClusterRole enumerating the permissions provider packages may request."` - LeaderElection bool `name:"leader-election" short:"l" help:"Use leader election for the controller manager." env:"LEADER_ELECTION"` - Registry string `short:"r" help:"Default registry used to fetch packages when not specified in tag." default:"${rbac_default_registry}" env:"REGISTRY"` + Profile string `help:"Serve runtime profiling data via HTTP at /debug/pprof." placeholder:"host:port"` - ManagementPolicy string `name:"manage" short:"m" hidden:""` - DeprecatedManagementPolicy string `name:"deprecated-manage" hidden:"" default:"${rbac_manage_default_var}" enum:"${rbac_manage_enum_var}"` + ProviderClusterRole string `help:"A ClusterRole enumerating the permissions provider packages may request." name:"provider-clusterrole"` + LeaderElection bool `env:"LEADER_ELECTION" help:"Use leader election for the controller manager." name:"leader-election" short:"l"` + Registry string `default:"${rbac_default_registry}" env:"REGISTRY" help:"Default registry used to fetch packages when not specified in tag." short:"r"` - SyncInterval time.Duration `short:"s" help:"How often all resources will be double-checked for drift from the desired state." default:"1h"` - PollInterval time.Duration `help:"How often individual resources will be checked for drift from the desired state." default:"1m"` - MaxReconcileRate int `help:"The global maximum rate per second at which resources may checked for drift from the desired state." default:"10"` + SyncInterval time.Duration `default:"1h" help:"How often all resources will be double-checked for drift from the desired state." short:"s"` + PollInterval time.Duration `default:"1m" help:"How often individual resources will be checked for drift from the desired state."` + MaxReconcileRate int `default:"10" help:"The global maximum rate per second at which resources may checked for drift from the desired state."` } // Run the RBAC manager. func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { - if c.ManagementPolicy != "" { - return errors.New("--manage is deprecated, you can use --deprecated-manage until it is removed: see https://github.com/crossplane/crossplane/issues/5227") - } - - log.Debug("Starting", "policy", c.DeprecatedManagementPolicy) - cfg, err := ctrl.GetConfig() if err != nil { return errors.Wrap(err, "cannot get config") @@ -119,7 +96,6 @@ func (c *startCommand) Run(s *runtime.Scheme, log logging.Logger) error { GlobalRateLimiter: ratelimiter.NewGlobal(c.MaxReconcileRate), }, AllowClusterRole: c.ProviderClusterRole, - ManagementPolicy: rbaccontroller.ManagementPolicy(c.DeprecatedManagementPolicy), DefaultRegistry: c.Registry, } diff --git a/contributing/README.md b/contributing/README.md index bfe83a4bd..474afaace 100644 --- a/contributing/README.md +++ b/contributing/README.md @@ -10,13 +10,38 @@ us on [Slack]. Please also take a look at our [code of conduct], which details how contributors are expected to conduct themselves as part of the Crossplane community. +## Establishing a Development Environment + +> The Crossplane project consists of several repositories under the crossplane +> and crossplane-contrib GitHub organisations. We're experimenting with +> [Earthly] in this repository (crossplane) and crossplane-runtime. Most other +> repositories use a `Makefile`. To establish a development environment for a +> repository with a `Makefile`, try running `make && make help`. + +Crossplane is written in [Go]. You don't need to have Go installed to contribute +code to Crossplane but it helps to use an editor that understands Go. + +To setup a Crossplane development environment: + +1. Fork and clone this repository. +1. Install [Docker][get-docker] and [Earthly][get-earthly]. + +Use the `earthly` command to build and test Crossplane. Run `earthly doc` to see +available build targets. + +Useful targets include: + +* `earthly +reviewable` - Run code generators, linters, and unit tests. +* `earthly -P +e2e` - Run end-to-end tests. +* `earthly +hack` - Build Crossplane and deploy it to a local `kind` cluster. + ## Checklist Cheat Sheet Wondering whether something on the pull request checklist applies to your PR? Generally: * Everyone must read and follow this contribution process. -* Every PR must run (and pass) `make reviewable`. +* Every PR must run (and pass) `earthly +reviewable`. * Most PRs that touch code should touch unit tests. We want ~80% coverage. * Any significant feature should be covered by E2E tests. If you're adding a new feature, you should probably be adding or updating E2Es. @@ -63,7 +88,7 @@ Ensure each of your commits is signed-off in compliance with the [Developer Certificate of Origin] by using `git commit -s`. The Crossplane project highly values readable, idiomatic Go code. Familiarise yourself with the [Coding Style](#coding-style) section below and try to preempt any comments your -reviewers would otherwise leave. Run `make reviewable` to lint your change. +reviewers would otherwise leave. Run `earthly +reviewable` to lint your change. All Crossplane features must be covered by unit **and** end-to-end (E2E) tests. @@ -473,6 +498,136 @@ func example() error { } ``` +Previously we made heavy use of error constants, for example: + +```go +const errFetch = "could not fetch the thing" + +if err != nil { + return errors.Wrap(err, errFetch) +} +``` + +__We no longer recommend this pattern__. Instead, you should mostly create or +wrap errors with "inline" error strings. Refer to [#4514] for context. + +### Test Error Properties, not Error Strings + +We recommend using `cmpopts.EquateErrors` to test that your code returns the +expected error. This `cmp` option will consider one error that `errors.Is` +another to be equal to it. + +When testing a simple function with few error cases it's usually sufficient to +test simply whether or not an error was returned. You can use `cmpopts.AnyError` +for this. We prefer `cmpopts.AnyError` to a simple `err == nil` test because it +keeps our tests consistent. This way it's easy to mix and match tests that check +for `cmpopts.AnyError` with tests that check for a more specific error in the +same test table. + +For example: + +```go +func TestQuack(t *testing.T) { + type want struct { + output string + err error + } + + // We only care that Quack returns an error when supplied with a bad + // input, and returns no error when supplied with good input. + cases := map[string]struct{ + input string + want want + }{ + "BadInput": { + input: "Hello!", + want: want{ + err: cmpopts.AnyError, + }, + }, + "GoodInput": { + input: "Quack!", + want: want{ + output: "Quack!", + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got, err := Quack(tc.input) + + if diff := cmp.Diff(got, tc.want.output); diff != "" { + t.Errorf("Quack(): -got, +want:\n%s", diff) + } + + if diff := cmp.Diff(err, tc.want.err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("Quack(): -got, +want:\n%s", diff) + } + }) + } +} +``` + +For more complex functions with many error cases (like `Reconciler` methods) +consider injecting dependencies that you can make return a specific sentinel +error. This way you're able to test that you got the error you'd expect given a +particular set of inputs and dependency behaviors, not another unexpected error. +For example: + +```go +func TestComplicatedQuacker(t *testing.T) { + // We'll inject this error and test we return an error that errors.Is + // (i.e. wraps) it. + errBoom := errors.New("boom") + + type want struct { + output string + err error + } + + cases := map[string]struct{ + q Quacker + input string + want want + }{ + "BadQuackModulator": { + q: &ComplicatedQuacker{ + DuckIdentifer: func() (Duck, error) { + return &MockDuck{}, nil + }, + QuackModulator: func() (int, error) { + // QuackModulator returns our sentinel + // error. + return 0, errBoom + } + }, + input: "Hello!", + want: want{ + // We want an error that errors.Is (i.e. wraps) + // our sentinel error. We don't test what error + // message it was wrapped with. + err: errBoom, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got, err := tc.q.Quack(tc.input) + + if diff := cmp.Diff(got, tc.want.output); diff != "" { + t.Errorf("q.Quack(): -got, +want:\n%s", diff) + } + + if diff := cmp.Diff(err, tc.want.err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("q.Quack(): -got, +want:\n%s", diff) + } + }) + } +} +``` + ### Scope Errors Where possible, keep errors as narrowly scoped as possible. This avoids bugs @@ -678,7 +833,7 @@ func TestExample(t *testing.T) { // even for simple comparisons to keep test output // consistent. Some Crossplane specific cmp options can // be found in crossplane-runtime/pkg/test. - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { t.Errorf("%s\nExample(...): -want, +got:\n%s", tc.reason, diff) } @@ -690,68 +845,19 @@ func TestExample(t *testing.T) { } ``` -## Establishing a Development Environment - -The Crossplane project consists of several repositories under the crossplane and -crossplane-contrib GitHub organisations. Most of these projects use the Upbound -[build submodule]; a library of common Makefiles. Establishing a development -environment typically requires: - -1. Forking and cloning the repository you wish to work on. -1. Installing development dependencies. -1. Running `make` to establish the build submodule. - -Run `make help` for information on the available Make targets. Useful targets -include: - -* `make reviewable` - Run code generation, linters, and unit tests. -* `make e2e` - Run end-to-end tests. -* `make` - Build Crossplane. - -Once you've built Crossplane you can deploy it to a Kubernetes cluster of your -choice. [`kind`] (Kubernetes in Docker) is a good choice for development. The -`kind.sh` script contains several utilities to deploy and run a development -build of Crossplane to `kind`: - -```bash -# Build Crossplane locally. -make - -# See what commands are available. -./cluster/local/kind.sh help - -# Start a new kind cluster. Specifying KUBE_IMAGE is optional. -KUBE_IMAGE=kindest/node:v1.27.1 ./cluster/local/kind.sh up - -# Use Helm to deploy the local build of Crossplane. -./cluster/local/kind.sh helm-install - -# Use Helm to upgrade the local build of Crossplane. -./cluster/local/kind.sh helm-upgrade -``` - -When iterating rapidly on a change it can be faster to run Crossplane as a local -process, rather than as a pod deployed by Helm to your Kubernetes cluster. Use -Helm to install your local Crossplane build per the above instructions, then: - -```bash -# Stop the Helm-deployed Crossplane pod. -kubectl -n crossplane-system scale deploy crossplane --replicas=0 - -# Run Crossplane locally; it should connect to your kind cluster if said cluster -# is your active kubectl context. You can also go run cmd/crossplane/main.go. -make run -``` - [Slack]: https://slack.crossplane.io/ -[code of conduct]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md -[build submodule]: https://github.com/upbound/build/ +[code of conduct]: https://github.com/cncf/foundation/blob/main/code-of-conduct.md +[Earthly]: https://docs.earthly.dev +[get-docker]: https://docs.docker.com/get-docker +[get-earthly]: https://earthly.dev/get-earthly +[Go]: https://go.dev +[build submodule]: https://github.com/crossplane/build/ [`kind`]: https://kind.sigs.k8s.io/ [Crossplane release cycle]: https://docs.crossplane.io/knowledge-base/guides/release-cycle -[good git commit hygiene]: https://www.futurelearn.com/info/blog/telling-stories-with-your-git-history +[good git commit hygiene]: https://www.futurelearn.com/info/blog/telling-stories-with-your-git-history?category=using-futurelearn [Developer Certificate of Origin]: https://github.com/apps/dco -[code review comments]: https://github.com/golang/go/wiki/CodeReviewComments -[test review comments]: https://github.com/golang/go/wiki/TestComments +[code review comments]: https://go.dev/wiki/CodeReviewComments +[test review comments]: https://go.dev/wiki/TestComments [E2E readme]: ../test/e2e/README.md [docs]: https://github.com/crossplane/docs [Effective Go]: https://golang.org/doc/effective_go @@ -765,3 +871,4 @@ make run [CODEOWNERS]: ../CODEOWNERS [Reviewers]: ../OWNERS.md#reviewers [Maintainers]: ../OWNERS.md#maintainers +[#4514]: https://github.com/crossplane/crossplane/issues/4514 diff --git a/contributing/guide-adding-external-secret-stores.md b/contributing/guide-adding-external-secret-stores.md index 8f4093c40..0b14d2241 100644 --- a/contributing/guide-adding-external-secret-stores.md +++ b/contributing/guide-adding-external-secret-stores.md @@ -124,7 +124,7 @@ requires some dirty work as we need to this for all types: You can check [this commit as an example for changes in Setup functions] as an example. -[External Secret Stores]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-external-secret-stores.md +[External Secret Stores]: https://github.com/crossplane/crossplane/blob/main/design/design-doc-external-secret-stores.md [this PR as a complete example]: https://github.com/crossplane/provider-gcp/pull/421 [this PR instead]: https://github.com/crossplane-contrib/provider-jet-template/pull/23/commits [this commit as an example on how to add the type]: https://github.com/crossplane-contrib/provider-aws/pull/1242/commits/d8a2df323fa2489d82bf1843d2fe338de033c61d diff --git a/contributing/guide-adding-fuzz-test-cases.md b/contributing/guide-adding-fuzz-test-cases.md index e6d4a0a02..0775f5dbc 100644 --- a/contributing/guide-adding-fuzz-test-cases.md +++ b/contributing/guide-adding-fuzz-test-cases.md @@ -102,12 +102,12 @@ put in place a few configurations across different repositories: [CIFuzz]: https://google.github.io/oss-fuzz/getting-started/continuous-integration/ -[fuzz-audit-report]: https://github.com/crossplane/crossplane/blob/master/security/ADA-fuzzing-audit-22.pdf +[fuzz-audit-report]: https://github.com/crossplane/crossplane/blob/main/security/ADA-fuzzing-audit-22.pdf [go-fuzz]: https://go.dev/security/fuzz/ [oss-fuzz-arch]: https://google.github.io/oss-fuzz/architecture/ [oss-fuzz-folder]: https://github.com/google/oss-fuzz/tree/master/projects/crossplane [oss-fuzz]: https://github.com/google/oss-fuzz -[xp-ci]: https://github.com/crossplane/crossplane/blob/master/.github/workflows/ci.yml +[xp-ci]: https://github.com/crossplane/crossplane/blob/main/.github/workflows/ci.yml [xp-fuzz-tests]: https://github.com/search?q=repo%3Acrossplane%2Fcrossplane+%22func+Fuzz%22&type=code -[xp-fuzz_oss_build]: https://github.com/crossplane/crossplane/blob/master/test/fuzz/oss_fuzz_build.sh -[xp-r-fuzz_oss_build]: https://github.com/crossplane/crossplane-runtime/blob/master/test/fuzz/oss_fuzz_build.sh +[xp-fuzz_oss_build]: https://github.com/crossplane/crossplane/blob/main/test/fuzz/oss_fuzz_build.sh +[xp-r-fuzz_oss_build]: https://github.com/crossplane/crossplane-runtime/blob/main/test/fuzz/oss_fuzz_build.sh diff --git a/contributing/guide-provider-development.md b/contributing/guide-provider-development.md index 469e13f62..2a4b6c582 100644 --- a/contributing/guide-provider-development.md +++ b/contributing/guide-provider-development.md @@ -644,7 +644,7 @@ feedback you may have about the development process! [reach out]: https://github.com/crossplane/crossplane#get-involved [crossplane org]: https://github.com/crossplane [`angryjet`]: https://github.com/crossplane/crossplane-tools -[Managed Resource API Patterns]: https://github.com/crossplane/crossplane/blob/master/design/one-pager-managed-resource-api-design.md +[Managed Resource API Patterns]: https://github.com/crossplane/crossplane/blob/main/design/one-pager-managed-resource-api-design.md [Crossplane CLI]: https://github.com/crossplane/crossplane-cli#quick-start-stacks [`angryjet` documentation]: https://github.com/crossplane/crossplane-tools/blob/master/README.md [code generation guide]: https://github.com/crossplane-contrib/provider-aws/blob/master/CODE_GENERATION.md diff --git a/contributing/specifications/functions.md b/contributing/specifications/functions.md index b43b954cc..50bf81e8f 100644 --- a/contributing/specifications/functions.md +++ b/contributing/specifications/functions.md @@ -15,7 +15,7 @@ A Function MUST implement a gRPC `FunctionRunnerService` server. A Function SHOULD implement the latest available version of this service - e.g. `v1beta1`. The authoritative definition of this service can be found at the following URL. -https://github.com/crossplane/crossplane/tree/master/apis/apiextensions/fn/proto +https://github.com/crossplane/crossplane/tree/main/apis/apiextensions/fn/proto A Function MUST copy the tag field from a RunFunctionRequest's RequestMeta message to the ResponseMeta tag field of the corresponding RunFunctionResponse. diff --git a/design/README.md b/design/README.md index 9cd3da7dc..b0737e656 100644 --- a/design/README.md +++ b/design/README.md @@ -39,7 +39,7 @@ welcome from any member of the Crossplane community, but feedback from the elected reviewers carries extra weight. The __document status__ reflects the lifecycle of the design. Designs may be -committed to master at any stage in their lifecycle as long as the status is +committed to `main` at any stage in their lifecycle as long as the status is indicated clearly. Use one of the following statuses: * _Speculative_ designs explore an idea without _yet_ explicitly proposing a diff --git a/design/defunct/one-pager-consuming-k8s-clusters.md b/design/defunct/one-pager-consuming-k8s-clusters.md index 6c32ccb02..359e5d0c4 100644 --- a/design/defunct/one-pager-consuming-k8s-clusters.md +++ b/design/defunct/one-pager-consuming-k8s-clusters.md @@ -304,7 +304,7 @@ Because the scheduling of `KubernetesApplication` resources is now isolated to target the `KubernetesTarget` resource, more intelligent scheduling can be enabled without touching other parts of the Crossplane ecosystem. Previously, a `KubernetesCluster` claim was used for claiming, consuming, and dynamically -provisioning Kubernetes cluster resources so changes to the the API type related +provisioning Kubernetes cluster resources so changes to the API type related to scheduling (i.e. consuming) could unintentionally affect those other capabilities as well. Potential future scheduling improvements could involve price, latency, and geographic optimization by surfacing additional fields or diff --git a/design/defunct/one-pager-default-resource-class.md b/design/defunct/one-pager-default-resource-class.md index 941197a9c..d804cb281 100644 --- a/design/defunct/one-pager-default-resource-class.md +++ b/design/defunct/one-pager-default-resource-class.md @@ -36,7 +36,7 @@ that can be consumed in a portable manner across cloud providers. ### Original State -Originally, resource claims had to explicitly declare the the underlying +Originally, resource claims had to explicitly declare the underlying resource class that they want to inherit the configuration from on deployment. For example, the following resource class could be declared for a Postgres RDS database instance on AWS: diff --git a/design/defunct/one-pager-strongly-typed-class.md b/design/defunct/one-pager-strongly-typed-class.md index 334590907..d2971a000 100644 --- a/design/defunct/one-pager-strongly-typed-class.md +++ b/design/defunct/one-pager-strongly-typed-class.md @@ -86,7 +86,7 @@ spec: engineVersion: "9.6" ``` -This model is powerful because it allows an application developer to create a resource claim without having to know the implementation details or even the underlying provider. However, the fact that every resource class is of the same `kind` presents a key issue: The required parameters for a resource class may vary widely, and they are currently only provided as an arbitrary map that is eventually read by the controller for the the specified `provisioner`. Therefore, an administrator who is creating resource classes does not know what fields are required and will not be notified of missing or extraneous fields until the provisioning of a resource that references the class. +This model is powerful because it allows an application developer to create a resource claim without having to know the implementation details or even the underlying provider. However, the fact that every resource class is of the same `kind` presents a key issue: The required parameters for a resource class may vary widely, and they are currently only provided as an arbitrary map that is eventually read by the controller for the specified `provisioner`. Therefore, an administrator who is creating resource classes does not know what fields are required and will not be notified of missing or extraneous fields until the provisioning of a resource that references the class. The `parameters` supplied by the resource class are used to populate the `spec` of the managed resource (i.e. the Kubernetes representation of the external resource) when it is created. For instance, the creation of `mysql-claim`, which references the `standard-mysql` class, is watched by the claim controller for AWS RDS instances. It brings together the information provided in the claim and class to create the `RDSInstance` managed resource. Specifically, it calls the `ConfigureMyRDSInstance()` function. As part of the configuration, the function creates the `spec` of the `RDSInstance` managed resource from the `parameters` of the `ResourceClass`: diff --git a/design/design-doc-composition-validating-webhook.md b/design/design-doc-composition-validating-webhook.md index 41b5ec555..ebf44d9f7 100644 --- a/design/design-doc-composition-validating-webhook.md +++ b/design/design-doc-composition-validating-webhook.md @@ -208,7 +208,7 @@ unnecessary check in both `strict` and `loose` modes. #### Notes -A few additional notes worth highligting or making more explicit w.r.t. the description above: +A few additional notes worth highlighting or making more explicit w.r.t. the description above: * We identified 3 increasingly complex types of validation, that we will probably introduce in different phases and PRs: @@ -389,7 +389,7 @@ Ideally, the validation logic should be implemented as much as possible keeping in mind that it should be reusable for the following use-cases too: - linter -- langage server +- language server - future webhooks validating resources resulting in Compositions, e.g. Packages This does not mean that an initial implementation should be structured as a @@ -403,7 +403,7 @@ them. Already covered in the Background section with pros and cons. [original-webhook-pr]: https://github.com/crossplane/crossplane/pull/2919 -[original-webhook-design-doc]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-webhooks.md +[original-webhook-design-doc]: https://github.com/crossplane/crossplane/blob/main/design/design-doc-webhooks.md [original-composition-validation-webhook-issue]: https://github.com/crossplane/crossplane/issues/1476 [vscode-plugin]: https://github.com/upbound/vscode-up [upbound/up]: https://github.com/upbound/up/blob/main/internal/xpkg/snapshot/composition.go#L66 diff --git a/design/design-doc-external-secret-stores.md b/design/design-doc-external-secret-stores.md index 711eb1b2f..b3f5df333 100644 --- a/design/design-doc-external-secret-stores.md +++ b/design/design-doc-external-secret-stores.md @@ -635,6 +635,6 @@ related discussion or issue._ [Vault agent inject template]: https://learn.hashicorp.com/tutorials/vault/kubernetes-sidecar#apply-a-template-to-the-injected-secrets [ArgoCD cluster]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters [AWS secret manager]: https://aws.amazon.com/secrets-manager/ -[provider-aws Secret resource]: https://github.com/crossplane/provider-aws/blob/master/examples/secretsmanager/secret.yaml +[provider-aws Secret resource]: https://github.com/crossplane-contrib/provider-aws/blob/master/examples/secretsmanager/secret.yaml [GenericSecret]: https://registry.terraform.io/providers/hashicorp/vault/latest/docs/resources/generic_secret [kubernetes-sigs/secrets-store-csi-driver]: https://github.com/kubernetes-sigs/secrets-store-csi-driver diff --git a/design/design-doc-observe-only-resources.md b/design/design-doc-observe-only-resources.md index 04ba953ec..aab2993f6 100644 --- a/design/design-doc-observe-only-resources.md +++ b/design/design-doc-observe-only-resources.md @@ -716,5 +716,5 @@ it as a migration path to Crossplane. [most recent AMI]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami#most_recent [desired tags]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc#tags [passing sensitive configuration]: https://github.com/crossplane/crossplane/pull/2886#discussion_r862615416 -[`type: Webhook` composition function]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-composition-functions.md#using-webhooks-to-run-functions +[`type: Webhook` composition function]: https://github.com/crossplane/crossplane/blob/main/design/design-doc-composition-functions.md#using-webhooks-to-run-functions [ignore changes]: https://github.com/crossplane/crossplane/blob/ad0ff7d6d0e4850168883905ed8e1509089cea15/design/one-pager-ignore-changes.md \ No newline at end of file diff --git a/design/design-doc-packages-v2.md b/design/design-doc-packages-v2.md index 5a109bf9b..e8268302b 100644 --- a/design/design-doc-packages-v2.md +++ b/design/design-doc-packages-v2.md @@ -11,21 +11,21 @@ there are many other packaging formats in the Kubernetes ecosystem, Crossplane supports its own for the following reasons: - Crossplane is [opinionated about the capabilities of a - controller](https://github.com/crossplane/crossplane/blob/master/design/one-pager-packages-security-isolation.md#allowed-resource-access) + controller](https://github.com/crossplane/crossplane/blob/main/design/one-pager-packages-security-isolation.md#allowed-resource-access) that can be installed to extend its functionality. For instance, controllers [may not run as - root](https://github.com/crossplane/crossplane/blob/master/design/one-pager-packages-security-isolation.md#package-deployment-privileges) + root](https://github.com/crossplane/crossplane/blob/main/design/one-pager-packages-security-isolation.md#package-deployment-privileges) or request cluster admin RBAC. - Crossplane [allocates and aggregates various - ClusterRoles](https://github.com/crossplane/crossplane/blob/master/design/one-pager-packages-security-isolation.md#crossplane-clusterroles--rbac) + ClusterRoles](https://github.com/crossplane/crossplane/blob/main/design/one-pager-packages-security-isolation.md#crossplane-clusterroles--rbac) to automatically provide permissions for users in the Kubernetes cluster to view / edit / create / delete CRDs installed by a package. - Crossplane guards against conflicting CRDs being installed into a cluster. - Crossplane adds [additional metadata to - CRDs](https://github.com/crossplane/crossplane/blob/master/design/one-pager-stack-ui-metadata.md#crd-annotation-example) + CRDs](https://github.com/crossplane/crossplane/blob/main/design/one-pager-stack-ui-metadata.md#crd-annotation-example) to provide additional context for displaying their configuration in a UI. - Crossplane [adds labels to - CRDs](https://github.com/crossplane/crossplane/blob/master/design/one-pager-stack-relationship-labels.md#example-wordpress-crdyaml-parented-by-stackinstall) + CRDs](https://github.com/crossplane/crossplane/blob/main/design/one-pager-stack-relationship-labels.md#example-wordpress-crdyaml-parented-by-stackinstall) in an attempt to establish parent-child relationships between CRDs. In addition, the following unimplemented features are goals of the Crossplane @@ -56,12 +56,12 @@ effective than desired in practice. The current package infrastructure, though well thought out, has become somewhat convoluted and redundant with the introduction of -[composition](https://github.com/crossplane/crossplane/blob/master/design/design-doc-composition.md) +[composition](https://github.com/crossplane/crossplane/blob/main/design/design-doc-composition.md) into the Crossplane ecosystem. Composition solves the following goals originally intended to be addressed by a `PackageInstall` and [template -stacks](https://github.com/crossplane/crossplane/blob/master/design/one-pager-resource-packs.md): +stacks](https://github.com/crossplane/crossplane/blob/main/design/one-pager-resource-packs.md): - Ability to publish infrastructure abstractions to specific namespaces. - The `PackageInstall` allowed packages to install a namespace-scoped CRD and @@ -161,7 +161,7 @@ workflow the package manager uses for installing a `Package`. We will use a [stdout](https://github.com/crossplane/crossplane/blob/a0d139f7cf269599ba916ed15af3fd68ffeabbdf/cmd/crossplane/package/unpack/unpack.go#L53). 4. The _`ClusterPackageInstall` controller_ waits for the `Job` to complete successfully before reading the logs from the `Pod`. When the `Job` is - complete, it reads the the logs and creates all of the objects that were + complete, it reads the logs and creates all of the objects that were printed, making a [few modifications as well as annotating and labelling appropriately](https://github.com/crossplane/crossplane/blob/6fc50822fbf11a7d31f8a9dabde5c8948c3b36ac/pkg/controller/packages/install/installjob.go#L259). 5. The _`Package` controller_ observes the `Package` creation and assumes the @@ -175,7 +175,7 @@ workflow the package manager uses for installing a `Package`. We will use a [syncing](https://github.com/crossplane/crossplane/blob/6fc50822fbf11a7d31f8a9dabde5c8948c3b36ac/pkg/controller/packages/pkg/pkg.go#L696) the `Secret` for the `ServiceAccount` that are required for running the controller in [host aware - mode](https://github.com/crossplane/crossplane/blob/master/design/one-pager-host-aware-stack-manager.md). + mode](https://github.com/crossplane/crossplane/blob/main/design/one-pager-host-aware-stack-manager.md). The process for a `PackageInstall` is very similar, but the packages using the templating controller have the additional step of first producing a @@ -714,7 +714,7 @@ will operate with the following behavior. `spec.desiredState: Active`. 3. Set `status.currentRevision` to full image name used for `PackageRevision` (this can be obtained from the `Pod` in the install `Job`) -4. Report status of `PackageRevision` in the the status of the `Package`. +4. Report status of `PackageRevision` in the status of the `Package`. **`Package` Created with `spec.revisionActivationPolicy: Manual`** @@ -723,7 +723,7 @@ will operate with the following behavior. `spec.desiredState: Inactive`. 3. Set `status.currentRevision` to full image name used for `PackageRevision` (this can be obtained from the `Pod` in the install `Job`) -4. Report status of `PackageRevision` in the the status of the `Package`. +4. Report status of `PackageRevision` in the status of the `Package`. User is responsible for manually setting the `PackageRevision` to `Active`. diff --git a/design/design-doc-provider-strategy.md b/design/design-doc-provider-strategy.md index 35735f7ff..256737162 100644 --- a/design/design-doc-provider-strategy.md +++ b/design/design-doc-provider-strategy.md @@ -472,7 +472,7 @@ From resource definition perspective, each cloud deserves its own summary: * Google TF uses DCL whenever possible already and the GCP API is already resource-based. * There isn't much discrepancy here. -* AWS TF seems to have generally been followin CloudFormation which is powered +* AWS TF seems to have generally been following CloudFormation which is powered by Cloud Control. * There are exceptions though, so one needs to check Cloud Control Registry. * Azure TF uses Azure SDK mostly and Azure API is already resource-based. diff --git a/design/design-doc-rbac-manager.md b/design/design-doc-rbac-manager.md index c5df91fb5..59f5669b0 100644 --- a/design/design-doc-rbac-manager.md +++ b/design/design-doc-rbac-manager.md @@ -20,7 +20,7 @@ Crossplane, as a project, consists of three key building blocks: Each provider is a distinct process that is typically deployed as a pod. The API extensions and package manager controllers are part of the 'core' Crossplane controller manager process. The core controller manager is therefore responsible -for _extending Crossplane_. Its controllers add and and remove Custom Resource +for _extending Crossplane_. Its controllers add remove Custom Resource Definitions (CRDs) to and from the API server. The core Crossplane controllers define custom resources (CRs) that represent: diff --git a/design/design-doc-terraform-provider-runtime.md b/design/design-doc-terraform-provider-runtime.md index 814e84da1..8a4c7991a 100644 --- a/design/design-doc-terraform-provider-runtime.md +++ b/design/design-doc-terraform-provider-runtime.md @@ -372,7 +372,7 @@ between: 1. specifying structured metadata describing references to the code generator 2. add reference handling callbacks to the `plugin.Implementation` scheme. These could plug into the `Reconciler` via the - [ReferenceResolver](https://github.com/crossplane/crossplane-runtime/blob/master/pkg/reconciler/managed/reconciler.go#L143) + [ReferenceResolver](https://github.com/crossplane/crossplane-runtime/blob/main/pkg/reconciler/managed/reconciler.go#L143) interface. 3. building a reference implementation that does not need to know how resources can refer to each other diff --git a/design/design-doc-terrajet.md b/design/design-doc-terrajet.md index acaf33e13..2f5a1e0d7 100644 --- a/design/design-doc-terrajet.md +++ b/design/design-doc-terrajet.md @@ -553,9 +553,9 @@ license restrictions. [resolve-references-example]: https://github.com/crossplane/provider-aws/blob/c269977/apis/apigatewayv2/v1alpha1/referencers.go#L30 [resolve-references]: https://github.com/crossplane/crossplane-runtime/blob/f2440d9/pkg/reference/reference.go#L105 [dcl]: https://github.com/GoogleCloudPlatform/declarative-resource-client-library/blob/338dce1/services/google/compute/firewall_policy_rule.go#L321 -[ack-codegen]: https://github.com/crossplane/provider-aws/blob/master/CODE_GENERATION.md +[ack-codegen]: https://github.com/crossplane-contrib/provider-aws/blob/master/CODE_GENERATION.md [crossplane-tools]: https://github.com/crossplane/crossplane-tools/ -[ack-guide]: https://github.com/crossplane/provider-aws/blob/master/CODE_GENERATION.md +[ack-guide]: https://github.com/crossplane-contrib/provider-aws/blob/master/CODE_GENERATION.md [secret-key-selector]: https://github.com/crossplane/crossplane-runtime/blob/36fc69eff96ecb5856f156fec077ed3f3c3b30b1/apis/common/v1/resource.go#L72 [instance-state]: https://github.com/hashicorp/terraform-plugin-sdk/blob/0e34772/helper/schema/resource.go#L859 [resource-data]: https://github.com/hashicorp/terraform-plugin-sdk/blob/0e34772dad547d6b69148f57d95b324af9929542/helper/schema/resource_data.go#L22 diff --git a/design/one-pager-build-with-earthly.md b/design/one-pager-build-with-earthly.md new file mode 100644 index 000000000..396045add --- /dev/null +++ b/design/one-pager-build-with-earthly.md @@ -0,0 +1,249 @@ +# Build with Earthly + +* Owner: Nic Cope (@negz) +* Status: Proposed + +## Background + +Crossplane uses a `Makefile` with several targets, like `make build`, to +automate tasks that developers frequently need to run when developing +Crossplane. + +Crossplane also uses GitHub Actions for continous integration (CI), to validate +pull requests. Most of Crossplane's GitHub Actions workflows run the same Make +targets. This creates some consistency between local development and CI. For +example `make test` should have the same result whether run locally or in CI. + +The `Makefile` includes a moderate library of other `Makefiles`. These are +imported from the `build/makelib` directory. The `build` directory is a Git +submodule. Its source is https://github.com/crossplane/build. Most maintainers +call it "the build submodule". + +Crossplane uses the build submodule to: + +- Install pinned versions of common tools (`helm`, `kubectl`, etc) +- Cross-compile Crossplane for several platforms +- Produce a multi-platform OCI image for Crossplane +- Run code generation - e.g. `go generate` +- Validate code by running linters, unit tests, and end-to-end (E2E) tests +- Automatically derive the semantic version of artifacts from git tags +- Publish OCI image artifacts to OCI registries +- Publish binary and Helm chart artifacts to AWS S3 +- Promote artifacts to different distribution channels (i.e. tags, S3 dirs) + +The build submodule is also used by Crossplane extensions, like Providers. +Providers use the build submodule to do more than core Crossplane - for example +they use it to spin up `kind` clusters and deploy Crossplane for testing. + +In the 5+ years I've been a Crossplane maintainer, almost every new maintainer +(including myself) has expressed a dislike for the build submodule and a desire +to change build tooling. + +I believe folks dislike the build submodule because: + +- Make, as a language, has a high learning curve +- Few people have prior experience with advanced use of Make +- Needing to update a shared git submodule slows down changes to build logic + +It's worth noting that builds using the submodule aren't fully hermetic. It +strives to be hermetic: for example it uses pinned versions of tools like `helm` +and uses a per-project Go module cache. However it doesn't manage the Go +toolchain, and uses the global Go build cache. I've never heard anyone complain +about this, but it's an area that could be improved. + +## Proposal + +I proposed we switch from Make to https://earthly.dev. + +Earthly targets the 'glue' layer between language-specific tools like `go` and +CI systems like GitHub Actions. In Crossplane, Earthly would replace Make and +Docker. It's based on Docker's [BuildKit][buildkit], so all builds are +containerized and hermetic. + +### Configuration + +The Earthly equivalent of a `Makefile` is an `Earthfile`. An `Earthfile` is a +lot like a `Dockerfile`, but with Make-like targets: + +```Dockerfile +VERSION 0.8 +FROM golang:1.22 +WORKDIR /go-workdir + +deps: + COPY go.mod go.sum ./ + RUN go mod download + # Output these back in case go mod download changes them. + SAVE ARTIFACT go.mod AS LOCAL go.mod + SAVE ARTIFACT go.sum AS LOCAL go.sum + +build: + FROM +deps + COPY main.go . + RUN go build -o output/example main.go + SAVE ARTIFACT output/example AS LOCAL local-output/go-example + +docker: + COPY +build/example . + ENTRYPOINT ["/go-workdir/example"] + SAVE IMAGE go-example:latest +``` + +You'd run `earthly +docker` to build the Docker target in this example. + +At first glance Earthly looks very similar to a multi-stage Dockerfile. There's +a lot of overlap, but Earthly has a bunch of extra functionality that's useful +for a general purpose build tool, including: + +* Invoking other Dockerized things ([`WITH DOCKER`][earthfile-with-docker]) - + e.g. Crossplane's E2E tests +* Exporting files that changed in the build + ([`SAVE ARTIFACT AS LOCAL`][earthfile-save-artifact]) +* Targets that are simply aliases for a bunch of other targets. +* The ability to import Earthfiles from other repos without a submodule + ([`IMPORT`][earthfile-import]). + +I feel Earthly's key strength is its Dockerfile-like syntax. Before writing this +one-pager I ported 90% of Crossplane's build from Make to Earthly. I found it +much easier to pick up and iterate on than the build submodule. + +### Performance + +Earthly is as fast as Make when developing locally, but a little slower in CI. +CI is slower because the Go build cache doesn't persist across CI runs. + +Here are a few local development comparisons using a Linux VM with 6 Apple M1 +Max vCPUs and 20GiB of memory. + +| Task | Make | Earthly | +| --- | --- | --- | +| Build with a cold cache | ~46 seconds | ~60 seconds | +| Build with a hot cache (no changes) | ~2 seconds | ~1 second | +| Build with a hot cache (one Go file changed) | ~8 seconds | ~8 seconds | +| Build for all platforms with a cold cache | ~4 minutes 10 seconds | ~4 minutes 40 seconds | +| Build for all platforms with a hot cache (one Go file changed) | ~42 seconds | ~32 seconds | + +Here are some CI comparisons run on GitHub Actions standard workers. + +| Task | Make | Earthly | +| --- | --- | --- | +| Run linters | ~3 minutes | ~4 minutes | +| Run unit tests | ~3 minutes | ~2.5 minutes | +| Publish artifacts | ~12 minutes | ~14 minutes | +| Run E2E tests | ~12 minutes | ~14 minutes | + +Earthly uses caching to run containerized builds as fast as Make's "native" +builds. For Crossplane this primarily means two things: + +* It caches Go modules, and will only redownload them if `go.mod` changes. +* It stores the Go build cache in a cache volume that's reused across builds. + +This caching requires the BuildKit cache to persist across runs. The BuildKit +cache doesn't persist across GitHub Actions runs, because every job runs in a +clean runner environment. + +Crossplane's Make based GitHub actions use the [cache] GitHub Action to save the +Go module cache and build cache after each run, and load it before the next. +There's no good way to do this in Earthly today, per +https://github.com/earthly/earthly/issues/1540. + +Earthly's recommended approach to caching in CI is to use their Earthly +Satellite remote runners, or host your own remote BuildKit that persists across +runs. Neither are good fits for Crossplane. Satellites are a paid product, and +hosting BuildKit would mean paying for and operating build infrastructure. + +Earthly supports 'remote caching' of build layers in an OCI registry, but this +doesn't include `CACHE` volumes (i.e. the Go build cache). Typically CI is +configured to push the cache using the `--max-remote-cache` on main builds, then +PR builds use the `--remote-cache` flag to load the cache. + +My testing indicates remote caching would have little impact for our builds. For +example building Crossplane for all platforms, with one changed Go file, a cold +local cache, and a hot remote cache was only a second faster than building with +a cold cache. This is because the difference is mostly whether Go modules are +downloaded from the Go module proxy via `go mod download` or downdloaded from an +OCI registry as a cached layer. It's possible GitHub Actions caching to GitHub +Container Registry would have a more significant impact on build times. + +## Risks + +Earthly is an early product, currently at v0.8.11. In my testing it's been +mostly stable, though I've had to restart BuildKit a small handful of times due +to errors like https://github.com/earthly/earthly/issues/2454. + +Earthly also appears to be owned and primarily staffed by a single vendor, who +presumably would like to build a business around it. This could create conflicts +of interest - for example Earthly probably isn't incentivised to make CI caching +better given they're selling a CI caching solution (Satellites). It's worth +noting that Earthly switched _from_ BSL to MPL already. + +## Alternatives Considered + +I considered the following alternatives. + +### Make, but simpler + +Make isn't so bad when you only have a small handful of really simple targets. +In theory, this could be a nice alternative - strip everything down into one +streamlined `Makefile`. + +Unfortunately I don't think there's much in `makelib` that we can eliminate to +achieve this. The functionality (pinning tools, building for multiple platforms, +etc) has to be implemented somewhere. + +### Multistage docker builds + +This is the closest alternative to Earthly. It has the notable advantage that +Docker is able to leverage bind mounts and/or [native GitHub Actions cache +support][docker-actions-cache] to cache the Go build cache across runs. + +The main reason to avoid this route is that Docker doesn't make a great general +purpose build tool. For example there's no great way to invoke our (`kind` +based) E2E tests, or even output build artifacts. Earthly makes this point +pretty well in [this article][earthly-repeatable-builds]. + +### Dagger + +[Dagger][dagger] is architecturally similar to Earthly, in that it's built on +BuildKit and all builds are containerized. It differs significantly in how you +configure your build. + +In Dagger, you install one or more Dagger Functions. You then invoke these +Functions via the `dagger` CLI. There's no equivalent of a `Makefile` or an +`Earthfile` - if you need to string multiple functions together you write a new +function that calls them, and call that function. + +The result is you end up defining your build logic in a language like Go, for +example: + +* https://docs.dagger.io/quickstart/822194/daggerize +* https://docs.dagger.io/quickstart/428201/custom-function + +I could see this becoming useful if our build logic became _really_ complex, but +for our current use cases I prefer the simpler `Earthfile` syntax. + +### Bazel and friends + +[Bazel][bazel] and similar Google-Blaze-inspired tools like Pants and Buck focus +on fast, correct builds. They're especially well suited to large monorepos using +multiple languages, where building the entire monorepo for every change isn't +feasible. Bazel uses `BUILD` files with rules written in Starlark, a Pythonic +language. + +Bazel doesn't wrap tools like `go`, it completely replaces them. It's not +compatible with Go modules for example, and instead offers tools like `gazelle` +to generate a `BUILD` file from a module-based third party dependency. + +Bazel has a pretty large learning curve and tends to require a lot of care and +feeding to keep its `BUILD` files up-to-date. I don't feel it's a great fit for +a medium sized, single language, manyrepo project like Crossplane. + +[buildkit]: https://github.com/moby/buildkit +[earthfile-with-docker]: https://docs.earthly.dev/docs/earthfile#with-docker +[earthfile-save-artifact]: https://docs.earthly.dev/docs/earthfile#save-artifact +[earthfile-import]: https://docs.earthly.dev/docs/earthfile#import +[cache]: https://github.com/actions/cache +[docker-actions-cache]: https://docs.docker.com/build/cache/backends/gha/ +[earthly-repeatable-builds]: https://earthly.dev/blog/repeatable-builds-every-time/ +[dagger]: https://dagger.io +[bazel]: https://bazel.build \ No newline at end of file diff --git a/design/one-pager-composition-environment.md b/design/one-pager-composition-environment.md index e4ffa9736..082a0278b 100644 --- a/design/one-pager-composition-environment.md +++ b/design/one-pager-composition-environment.md @@ -248,7 +248,7 @@ provider is responsible for implementing and supporting this feature. See https://github.com/crossplane/crossplane/issues/1770 for details. However, the security issues mentioned in [Patch from any Object](#patch-from-any-object) -would occure here as well. +would occur here as well. One could potentially use a managed resource to extract data from a secret within another namespace. diff --git a/design/one-pager-cross-resource-referencing.md b/design/one-pager-cross-resource-referencing.md index 9d6967936..34d40f177 100644 --- a/design/one-pager-cross-resource-referencing.md +++ b/design/one-pager-cross-resource-referencing.md @@ -281,15 +281,15 @@ dependent objects are deleted first. * [Honoring inter-resource dependency when creating/deleting resources](https://github.com/crossplane/crossplane/issues/708) * [Resource - Connectivity](https://github.com/crossplane/crossplane/blob/master/design/one-pager-resource-connectivity-mvp.md) + Connectivity](https://github.com/crossplane/crossplane/blob/main/design/one-pager-resource-connectivity-mvp.md) [gitops doc]: (https://www.weave.works/blog/what-is-gitops-really) [Subnet type]: -(https://github.com/crossplane/provider-aws/blob/master/apis/network/v1alpha2/subnet_types.go#L25-L37) +(https://github.com/crossplane-contrib/provider-aws/blob/master/apis/network/v1alpha2/subnet_types.go#L25-L37) [Subnetwork type]: -(https://github.com/crossplane/provider-gcp/blob/master/apis/compute/v1alpha2/subnetwork_types.go#L144) +(https://github.com/crossplane-contrib/provider-gcp/blob/master/apis/compute/v1alpha2/subnetwork_types.go#L144) [Managed Reconciler]: -https://github.com/crossplane/crossplane-runtime/blob/master/pkg/reconciler/managed/reconciler.go +https://github.com/crossplane/crossplane-runtime/blob/main/pkg/reconciler/managed/reconciler.go [Foreground cascading deletion]: (https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#foreground-cascading-deletion) diff --git a/design/one-pager-fn-claim-conditions.md b/design/one-pager-fn-claim-conditions.md new file mode 100644 index 000000000..e6a5c5644 --- /dev/null +++ b/design/one-pager-fn-claim-conditions.md @@ -0,0 +1,272 @@ +# Communication Between Composition Functions and the Claim + +* Owner: Dalton Hill (@dalton-hill-0) +* Reviewers: Nic Cope (@negz) +* Status: Draft + +## Background + +### Desired Behavior +Composition Function authors should be able to communicate and translate +the underlying status with users. + +#### Managed Resource Status +We think authors often won't want to surface the status as it appear on an MR, +but will probably want to derive more user-friendly messages from it. Messages +that are more meaningful to folks reading claims. + +Some examples include: +- The external system for an MR is unreachable. +- The MR is incorrectly configured. +- The MR is being created, updated, etc. + +#### Internal Errors +We think authors may want to have a catch-all Internal Error +message. Authors should be able to display the real error on the XR and provide +a basic "Internal Error" message on the Claim. + +Currently internal errors often leave the Claim in a "Waiting" state. It would +be nice to notify the user that an internal error was encountered, and that the +team has been notified by an alert. + +### Existing Behavior + +#### Function Results +Currently functions can return Results. Depending on the type of results seen, +you can observe the following behavior on the Composite Resource. + +Fatal Result: +- Synced status condition is set to False, contains result's message. +- Warning Event generated (reason: ReconcileError), containing result's message. + +Warning Result: +- Warning Event (reason: ComposeResources) generated, containing result's + message. + +Normal Result: +- Normal Event (reason: ComposeResources) generated, containing result's + message. + + +#### Setting the Claim's Status +Currently the only path to communicate a custom message with the user is by +defining your own field in the Claim's status. +For example, we can define an XRD with: +```yaml +status: + someCommunicationField: + - msg: "Something went wrong." +``` + +There are a couple issues with this solution. +- If we need to halt resource reconciliation due to a fatal error, we can do so + with the [SDK](https://github.com/crossplane/function-sdk-go)'s + `response.Fatal`, however, this does not also allow us to update the XR and + Claim for communication with the user. +- There is an existing field that would be more intuitive to use as it is + already performing this same task for Crossplane itself (`status.conditions`). + +#### Setting the Composite's Status Conditions +Currently you can update the Composite's status conditions by setting them with +SetDesiredCompositeResource. +There are a couple of limitations to this: +- it only shows up on the XR +- it only shows up if there are no fatal results + +Example of setting the Composite's status conditions. +```go +// includes: +// corev1 "k8s.io/api/core/v1" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +// +// "github.com/crossplane/function-sdk-go/response" +desiredXR, err := request.GetDesiredCompositeResource(req) +c := xpv1.Condition{ + Type: xpv1.ConditionType("ImageReady"), + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: "NotFound", + Message: "The image provided does not exist or you are not "+ + "authorized to use it.", +} +desiredXR.Resource.SetConditions(c) +response.SetDesiredCompositeResource(rsp, desiredXR) +``` + +## Proposal +We would like to allow the Composition Function author to: +- Choose where results go (Claim or XR) +- Allow results to update the Status Conditions of the XR and Claim + +The following sections get into the details of each of the above items. + +### Choose Where Results Go +Currently each result returned by a function will create a corresponding +event on the XR (if no previous fatal result exists). + +We can expand this functionality by allowing the Result to have targets. In +order to accomplish this, we will need to expand the Result API as follows. +```protobuf +message Result { + // Omitted for brevity + + Target target = 3; +} + +// Target of Function results. +enum Target { + TARGET_UNSPECIFIED = 0; + TARGET_COMPOSITE_ONLY = 1; + TARGET_COMPOSITE_AND_CLAIM = 2; +} +``` +The reason for having `TARGET_COMPOSITE_AND_CLAIM` and not `TARGET_CLAIM` is an +implementation limitation. This prevents more involved API changes, and this +is also consistent with existing behavior (func copies to XR, Crossplane copies +XR to Claim). + +The following is an example of how a function author could use this behavior. +Note that this is just a sketch and may not be the final API. +```go +// import "github.com/crossplane/function-sdk-go/response" +response.Fatal(rsp, errors.New("The image provided does not exist or you are not authorized to use it.")). + ConditionFalse("ImageReady", "NotFound"). + TargetCompositeAndClaim() +``` + +To support this behavior, the status of the Composite would need an additional +field `claimConditions`. This field will contain the types of conditions that +should be propagated to the Claim. +```yaml +# composite status +status: + # The XR's condition types that should be back-propagated to the claim + claimConditions: [DatabaseReady, ImageReady] + # The XR's conditions + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: False + reason: NotFound + message: The image provided does not exist or you are not authorized to use it. +``` + +### Allow Results to Set a Condition +We would like the function author to be able to set the Claim's status +conditions. This would allow the function author to clearly communicate the +state of the Claim with their users. + +To allow the setting of conditions in the result, we will need to expand the +Result API as follows. +```protobuf +message Result { + // Omitted for brevity + + // Optionally update the supplied status condition on all targets. + // The result's reason and message will be used in the condition. + optional Condition condition = 4; +} + +message Condition { + // Type of the condition, e.g. DatabaseReady. + // 'Ready' and 'Synced' are reserved for use by Crossplane. + string type = 1; + + // Status of the condition. + Status status = 2; + + // Machine-readable PascalCase reason. + string reason = 3; +} +``` + +An example of a function utilizing this new ability: +```go +// rb "github.com/crossplane/function-sdk-go/response/result/builder" +// const databaseReady = "DatabaseReady" +// const reasonUnauthorized = "Unauthorized" +// var messageUnauthorized = errors.New("You are unauthorized to access this resource.") +result := rb.Fatal(messageUnauthorized). + TargetCompositeAndClaim(). + WithConditionFalse(databaseReady, reasonUnauthorized). + Build() +response.AddResult(rsp, result) +``` + +## Advanced Usage Example +Lets say we are a team of platform engineers who have a Crossplane offering. +For each Claim, we wish to expose a set of conditions that users can expect to +exist which provide: +- the current status of the underlying resources +- any steps required by the user to remediate an issue + +Lets say we have a claim that does the following.. +1. Accepts an identifier to an existing database +1. Accepts an image to deploy +1. Configures a deployment that uses the image provided and is authenticated to +the database. + +### Scenarios +Given a few different scenarios, users could expect to see the following +`status.conditions` for the claim. + +#### Image Not Found +First we found the database and determined that the user has authorization, +however, the image they provided was not found. + +An example of the Claim's status: +```yaml +status: + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: False + reason: NotFound + message: The image provided does not exist or you are not authorized to use + it. +``` +#### Progressing +All is fine and the application is progressing but not yet fully online. + +An example of the Claim's status: +```yaml +status: + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: True + reason: Available + - type: AppReady + status: False + reason: Creating + message: Waiting for the deployment to be available. +``` + +#### Success +Once everything is online and running smoothly, users should see something like +this. + +An example of the Claim's status: +```yaml +status: + conditions: + - type: DatabaseReady + status: True + reason: Available + - type: ImageReady + status: True + reason: Available + - type: AppReady + status: True + reason: Available +``` + +## Further Reading +- [k8s typical status properties](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties) diff --git a/design/one-pager-helm-provider.md b/design/one-pager-helm-provider.md index e4072681c..a17afee7c 100644 --- a/design/one-pager-helm-provider.md +++ b/design/one-pager-helm-provider.md @@ -26,8 +26,8 @@ clients/libraries for helm), **we will focus and only support Helm 3**. We will implement a Kubernetes controller watching `Release` custom resources and deploying helm charts with the desired configuration. Since this controller needs to interact with Kubernetes API server, it is a good fit for [Kubernetes -native providers](https://github.com/crossplane/crossplane/blob/master/design/one-pager-k8s-native-providers.md#kubernetes-native-providers) -concept in Crossplane. By using existing [Kubernetes Provider](https://github.com/crossplane/crossplane/blob/master/design/one-pager-k8s-native-providers.md#proposal-kubernetes-provider-kind) +native providers](https://github.com/crossplane/crossplane/blob/main/design/one-pager-k8s-native-providers.md#kubernetes-native-providers) +concept in Crossplane. By using existing [Kubernetes Provider](https://github.com/crossplane/crossplane/blob/main/design/one-pager-k8s-native-providers.md#proposal-kubernetes-provider-kind) Kind, we will be able to manage helm releases in **Crossplane provisioned external clusters**, **existing external clusters** and also **Crossplane control cluster** (a.k.a. local cluster). diff --git a/design/one-pager-ignore-changes.md b/design/one-pager-ignore-changes.md index d0eccc0ca..c5c59f45a 100644 --- a/design/one-pager-ignore-changes.md +++ b/design/one-pager-ignore-changes.md @@ -58,7 +58,7 @@ for ignoring some managed resource parameters during updates. ## Proposal -Proposed solution is to rework the new [managementPolicy] feature which came +Proposed solution is to rework the new [managementPolicies] feature which came with the [ObserveOnly] feature and transform it into a set of enum values representing what Crossplane should do with the managed resource. @@ -77,44 +77,44 @@ This will allow users to fine-tune how Crossplane manages the external resource, in a manner which is very explicit and easy to understand. Some examples on how the management policies would work and how they would -replace the current `managementPolicy` and `deletionPolicy`: +replace the current `managementPolicies` and `deletionPolicy`: ```yaml # Default spec: - managementPolicy: FullControl + managementPolicies: FullControl deletionPolicy: Delete # would be replaced with: spec: - managementPolicy: ["Create", "Update", "Delete", "Observe", "LateInitialize"] + managementPolicies: ["Create", "Update", "Delete", "Observe", "LateInitialize"] # or - managementPolicy: ["*"] + managementPolicies: ["*"] # ObserveOnly spec: - managmentPolicy: ObserveOnly + managementPolicies: ObserveOnly # would be replaced with: spec: - managementPolicy: ["Observe"] + managementPolicies: ["Observe"] # OrphanOnDelete spec: - managementPolicy: OrphanOnDelete + managementPolicies: OrphanOnDelete # would be replaced with: spec: - managementPolicy: ["Create", "Update", "Observe", "LateInitialize"] + managementPolicies: ["Create", "Update", "Observe", "LateInitialize"] -# pause can be achieved by setting managementPolicy to empty list instead of +# pause can be achieved by setting managementPolicies to empty list instead of # using the annotation spec: - managementPolicy: [] + managementPolicies: [] # Turn off late initialization spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] ``` In addition to the new management policy, we will also add a new field @@ -144,11 +144,11 @@ For example: policies according to [the ObserveOnly design doc.][ObserveOnly], but still retain some functionality if a non-default value was set. In practice, it meant that if the `deletionPolicy` was set to `Orphan`, and the -`managementPolicy` set to `FullControl`, the external resource would be +`managementPolicies` set to `FullControl`, the external resource would be orphaned. In the new design, we could still follow this approach, by orphaning the -resource even if the `managementPolicy` includes `Delete`, if the +resource even if the `managementPolicies` includes `Delete`, if the `deletionPolicy` is set to `Orphan`, until we entirely remove the deletion policy. @@ -170,11 +170,11 @@ future-proof. ### Migrating existing resources -The `managementPolicy` feature is alpha, so it should be ok to break the -API. The combinations of `managementPolicy` and `deletionPolicy` would look -like this in the new `managementPolicy` field. +The `managementPolicies` feature is alpha, so it should be ok to break the +API. The combinations of `managementPolicies` and `deletionPolicy` would look +like this in the new `managementPolicies` field. -| managementPolicy | deletionPolicy | new managementPolicy | +| managementPolicies | deletionPolicy | new managementPolicies | |------------------|----------------|---------------------------------------------------| | FullControl | Delete | ["*"] | | FullControl | Orphan | ["Create", "Update", "Observe", "LateInitialize"] | @@ -184,8 +184,8 @@ like this in the new `managementPolicy` field. | ObserveOnly | Orphan | ["Observe"] | As this will be a breaking change, if users want to keep the old -`managementPolicy` behaviour, we suggest pausing the reconciliation of the MR, -upgrading Crossplane, and then updating the `managementPolicy` to the desired +`managementPolicies` behaviour, we suggest pausing the reconciliation of the MR, +upgrading Crossplane, and then updating the `managementPolicies` to the desired value before unpausing the reconciliation. In reality this is only needed for the `ObserveOnly` and @@ -256,7 +256,7 @@ policy. ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] forProvider: maxSize: 5 minSize: 1 @@ -273,7 +273,7 @@ would need to be used alongside omitting `LateInitialize` management policy. ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] initProvider: readCapacity: 1 writeCapacity: 1 @@ -290,7 +290,7 @@ the autoscaler would be able to control the `desiredSize` after creation. ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] initProvider: scalingConfig: desiredSize: 1 @@ -308,7 +308,7 @@ Just omitting the `LateInitialize` management policy would be enough as the ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] forProvider: ... ``` @@ -322,7 +322,7 @@ omitting the `LateInitialize` management policy. Example: ```yaml spec: - managementPolicy: ["Create", "Update", "Delete", "Observe"] + managementPolicies: ["Create", "Update", "Delete", "Observe"] initProvider: members: - user1 @@ -349,7 +349,7 @@ Ref: [Upjet Initialize] or [AWS community provider tag example]. ### PartialControl management policy + initProvider -Proposed solution is to use the new [managementPolicy] field which came with +Proposed solution is to use the new [managementPolicies] field which came with the [ObserveOnly] feature and add a new management policy that will skip late initialization. The loss the information that the late initialization was providing would be offset by the `status.atProvider` @@ -410,7 +410,7 @@ policy. ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl forProvider: maxSize: 5 minSize: 1 @@ -427,7 +427,7 @@ would need to be used alongside `PartialControl` management policy. ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl initProvider: readCapacity: 1 writeCapacity: 1 @@ -444,7 +444,7 @@ autoscaler would be able to control the `desiredSize` after creation. ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl initProvider: scalingConfig: desiredSize: 1 @@ -468,7 +468,7 @@ Just using the `PartialControl` management policy would be enough as the ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl forProvider: ... ``` @@ -482,7 +482,7 @@ then ignored on updates would be solved by using `initProvider` alongside Example: ```yaml spec: - managementPolicy: PartialControl + managementPolicies: PartialControl initProvider: members: - user1 @@ -670,7 +670,7 @@ if this issue is not that widespread, we could have an easy fix. [Update section]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/pkg/reconciler/managed/reconciler.go#L1061-L1096 [Late Init section]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/pkg/reconciler/managed/reconciler.go#L1033-L1046 [Initialize]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/pkg/reconciler/managed/reconciler.go#L742 -[managementPolicy]: https://github.com/crossplane/crossplane-runtime/blob/1316ae6695eec09cf47abdfd0bc6273aeaab1895/apis/common/v1/policies.go#L22 +[managementPolicies]: https://github.com/crossplane/crossplane-runtime/blob/229b63d39990935b8130cf838e6488dcba5c085a/apis/common/v1/policies.go#L21 [ObserveOnly]: https://github.com/crossplane/crossplane/blob/019ddb55916396d654e53a86d9acf1cde49aee31/design/design-doc-observe-only-resources.md [ResourceLateInitialize]: https://github.com/crossplane/crossplane-runtime/blob/00239648258e9731c274fb1f879f8255b948c79a/pkg/reconciler/managed/reconciler.go#L1033 [Late Initialization Update]: https://github.com/crossplane/crossplane-runtime/blob/00239648258e9731c274fb1f879f8255b948c79a/pkg/reconciler/managed/reconciler.go#L1033 diff --git a/design/one-pager-k8s-native-providers.md b/design/one-pager-k8s-native-providers.md index b4ae23641..379c6632d 100644 --- a/design/one-pager-k8s-native-providers.md +++ b/design/one-pager-k8s-native-providers.md @@ -554,8 +554,8 @@ func (c *ClusterController) SetupWithManager(mgr ctrl.Manager) error { [Crossplane CLI]: https://github.com/crossplane/crossplane-cli [client-go]: https://github.com/kubernetes/client-go [managed reconciler]: https://github.com/crossplane/crossplane/blob/14fa6dda6a3e91d5f1ac98d1020a151b02311cb1/pkg/controller/workload/kubernetes/resource/resource.go#L401 -[claim reconciler]: https://github.com/crossplane/crossplane-runtime/blob/master/pkg/resource/claim_reconciler.go -[scheduler controller]: https://github.com/crossplane/crossplane/blob/master/pkg/controller/workload/kubernetes/scheduler/scheduler.go +[claim reconciler]: https://github.com/crossplane/crossplane-runtime/blob/main/pkg/resource/claim_reconciler.go +[scheduler controller]: https://github.com/crossplane/crossplane/blob/main/pkg/controller/workload/kubernetes/scheduler/scheduler.go [crossplane-runtime]: https://github.com/crossplane/crossplane-runtime [crossplane-runtime #22]: https://github.com/crossplane/crossplane-runtime/issues/22 [crossplane-runtime #34]: https://github.com/crossplane/crossplane-runtime/issues/34 diff --git a/design/one-pager-managed-resource-api-design.md b/design/one-pager-managed-resource-api-design.md index 26d46753a..fedc7a6f2 100644 --- a/design/one-pager-managed-resource-api-design.md +++ b/design/one-pager-managed-resource-api-design.md @@ -706,7 +706,7 @@ adding a field about that sync status and reconciler can mark the sync status in one of the `Condition`s we already have or add a new one. [package]: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/eks -[terminology]: https://github.com/crossplane/crossplane/blob/master/docs/concepts/terminology.md +[terminology]: https://github.com/crossplane/crossplane/blob/main/docs/concepts/terminology.md [from crossplane-runtime]: https://github.com/crossplane/crossplane-runtime/blob/ca4b6b4/apis/core/v1alpha1/resource.go#L77 [Kubernetes API Conventions - Spec and Status]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status [managed reconciler]: https://github.com/crossplane/crossplane-runtime/blob/84e629b9589852df1322ff1eae4c6e7639cf6e99/pkg/reconciler/managed/reconciler.go#L637 diff --git a/design/one-pager-rate-limiting.md b/design/one-pager-rate-limiting.md new file mode 100644 index 000000000..d317e9534 --- /dev/null +++ b/design/one-pager-rate-limiting.md @@ -0,0 +1,151 @@ +# Reconciler Rate Limiting + +* Owner: Nic Cope (@negz) +* Status: Accepted + +> This one pager retroactively documents a past design decision. See +> [issue #2595] for the original proposal. + +## Background + +Crossplane consists of a series of controllers working together. Ultimately, the +job of those controllers is to reconcile desired state with an external system. +The external system might be Amazon Web Services (AWS), GitHub, or a Kubernetes +cluster. + +Crossplane and Crossplane provider reconciles are rate limited. These rate limits +attempt to ensure: + +* The maximum rate at which Crossplane calls the external system is predictable. +* Crossplane doesn't overload the API server, or the external system. +* Crossplane is as performant as possible. + +It's important that the rate at which Crossplane calls the external system is +predictable because some API calls may cost money. It's also important because +API calls are typically rate limited by the external system. Users may not want +Crossplane to exhaust those rate limits, for example because it must coexist +with other tools that are also subject to the same rate limits. + +Each Crossplane provider exposes a `--max-reconcile-rate` flag that tunes its +rate limits. This flag allows users to make their own trade off between +increased reconcile throughput and increased external API calls. + +## Controller Runtime Rate Limits + +A controller built using `controller-runtime` v0.17 uses the following defaults. + +### API Server Request Rate + +An API server client that rate limits itself to 20 queries per second (qps), +bursting to 30 queries. This client is shared by all controllers that are part +of the same controller manager (e.g. same provider). See [`config.go`]. + +### Reconcile Rate + +A rate limiter that rate limits reconciles triggered by _only_: + +* A watched object changing. +* A previous reconcile attempt returning an error. +* A previous reconcile attempt returning `reconcile.Result{Requeue: true}`. + +Importantly, a reconcile triggered by a previous reconcile attempt returning +`reconcile.Result{RequeueAfter: t}` is not subject to rate limiting. This means +reconciles triggered by `--poll-interval` are not subject to rate limiting when +using `controller-runtime` defaults. + +When a reconcile is subject to rate limiting, the earliest time the controller +will process it will be the **maximum** of: + +* The enqueue time plus a duration increasing exponentially from 5ms to 1000s + (~16 minutes). +* The enqueue time plus a duration calculated to limit the controller to 10 + requeues per second on average, using a token bucket algorithm. + +The exponential backoff rate limiting is per object (e.g. per managed resource) +while the token bucket rate limiter is per controller (e.g. per _kind of_ +managed resource). + +See [`controller.go`] and [`default_rate_limiters.go`]. + +### Concurrent Reconciles + +Each controller may process at most one reconcile concurrently. + +## Crossplane Rate Limits + +The controller-runtime defaults are not suitable for Crossplane. Crossplane +wants: + +* To wait more than 5ms before requeuing, but less than 16 minutes. +* To reconcile several managed resources of a particular kind at once. +* To rate limit _classes_ of managed resource (e.g. all AWS resources, or all + EC2 resources). + +Crossplane attempts to achieve this by deriving several rate limits from a +single flag - `--max-reconcile-rate`. The default value for this flag is usually +10 reconciles per second. The flag applies to an entire controller manager (e.g. +Crossplane, or a provider). + +Note that provider maintainers must use the functions defined in [`default.go`] +to ensure these rate limits are applied at the client, global, and controller +levels. + +### API Server Request Rate + +An API server client that rate limits itself to `--max-reconcile-rate * 5` qps, +and `--max-reconcile-rate * 10` burst. With a default `--max-reconcile-rate` of +10 this is 50 qps bursting to 100 queries. This client is shared by all +controllers that are part of the same controller manager (e.g. same provider). +See [`default.go`]. + +### Reconcile Rate + +Crossplane uses two layers of rate limiting. + +A global token bucket rate limiter limits all controllers within a provider to +`--max-reconcile-rate` reconciles per second, bursting to +`--max-reconcile-rate * 10`. With a default `--max-reconcile-rate` of 10 this is +10 reconciles per second, bursting to 100. + +All reconciles are subject to the global rate limiter, even those triggered by a +previous reconcile returning `reconcile.Result{RequeueAfter: t}`. + +An exponential backoff rate limiter limits how frequently a particular object +may be reconciled, backing off from 1s to 60s. A reconcile triggered by a +previous reconcile returning `reconcile.Result{RequeueAfter: t}` is not subject +to this rate limiter. + +Due to limitations of controller-runtime (see [issue #857]) the global rate +limiter is implemented as a middleware `Reconciler`. See [`reconciler.go`]. + +Reconciles may be rate limited by both layers. + +Consider a reconcile that was requeued because it returned an error. First it's +subject to the controller's exponential backoff reconciler, which adds the +reconcile to the controller's work queue to be processed from 1 to 60 seconds in +the future. + +When the reconcile is popped from the head of the work queue it's processed by +the middleware `Reconciler`, subject to its token bucket reconciler. If there +are sufficient tokens available in the bucket, the reconcile is passed to the +wrapped (inner) `Reconciler` immediately. If there aren't sufficient tokens +available, the reconcile is returned to the tail of the work queue by returning +`reconcile.Result{RequeueAfter: t}`. + +This results in misleading work queue duration metrics. A reconcile may travel +through the work queue (at most) twice before it's processed. + +### Concurrent Reconciles + +Each controller may process at most `--max-reconcile-rate` reconciles +concurrently. With a default `--max-reconcile-rate` of 10 each controller may +process 10 reconciles concurrently. This means a provider will reconcile at most +10 managed resources of particular kind at once. + +[issue #2595]: https://github.com/crossplane/crossplane/issues/2595 +[`config.go`]: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.17.2/pkg/client/config/config.go#L96 +[`controller.go`]: https://github.com/kubernetes-sigs/controller-runtime/blob/v0.17.2/pkg/internal/controller/controller.go#L316 +[`default_rate_limiters.go`]: https://github.com/kubernetes/client-go/blob/v0.29.2/util/workqueue/default_rate_limiters.go#L39o +[`default.go`]: https://github.com/crossplane/crossplane-runtime/blob/v1.15.0/pkg/ratelimiter/default.go +[issue #857]: https://github.com/kubernetes-sigs/controller-runtime/issues/857 +[`reconciler.go`]: https://github.com/crossplane/crossplane-runtime/blob/v1.15.0/pkg/ratelimiter/reconciler.go#L43 \ No newline at end of file diff --git a/design/one-pager-resource-connectivity-mvp.md b/design/one-pager-resource-connectivity-mvp.md index 8544b922b..c33c199cc 100644 --- a/design/one-pager-resource-connectivity-mvp.md +++ b/design/one-pager-resource-connectivity-mvp.md @@ -253,7 +253,7 @@ spec: By comparison, a direct translation of the [GKE cluster external resource]'s writable API object fields to a Kubernetes YAML specification would be as follows. Note that the GKE API contains several deprecated fields, all of which -are superceded by others (e.g. `nodeConfig` is superceded by `nodePools`). The +are superseded by others (e.g. `nodeConfig` is superseded by `nodePools`). The below translation omits these deprecated fields. ```yaml @@ -688,7 +688,7 @@ resources need to be created beforehand: and associating it with a set of subnets. In addition, `RDSInstance`s also need the following resources, so that they are -accessible by the the worker nodes: +accessible by the worker nodes: * `DBSubnetGroup`: represents a group of `Subnet`s from different availability zones, @@ -964,7 +964,7 @@ ensure connectivity. We will need to wait until *after* the Wordspress stack is installed to create the VNet Rule on the MySQL DB due to the fact that the database will not exist -until the the stack references our `SQLServerClass` with a claim. +until the stack references our `SQLServerClass` with a claim. #### A Model for Deploying Wordpress diff --git a/design/proposal-controller-code-generation.md b/design/proposal-controller-code-generation.md index ee0ec014f..4a8d3159d 100644 --- a/design/proposal-controller-code-generation.md +++ b/design/proposal-controller-code-generation.md @@ -326,7 +326,7 @@ other supported languages inc javascript, python and .NET. [Magic Moduels]: https://github.com/GoogleCloudPlatform/magic-modules [OpenAPI Directory]: https://github.com/APIs-guru/openapi-directory [Pulumi]: https://github.com/pulumi/pulumi -[Reference Resolvers]: https://github.com/crossplane/crossplane/blob/master/design/one-pager-cross-resource-referencing.md +[Reference Resolvers]: https://github.com/crossplane/crossplane/blob/main/design/one-pager-cross-resource-referencing.md [alibaba-ros]: https://www.alibabacloud.com/product/ros [amazon-aso]: https://github.com/aws/aws-service-operator-k8s [aso-v2-architecture]: https://github.com/jaypipes/aws-service-operator-k8s/blob/91e63414efb00564662adf6eaafc20e124a3b2d3/docs/code-generation.md diff --git a/go.mod b/go.mod index 8e0caef7e..1dda313ba 100644 --- a/go.mod +++ b/go.mod @@ -1,61 +1,59 @@ module github.com/crossplane/crossplane -go 1.21 +go 1.22.3 -toolchain go1.21.6 +toolchain go1.22.5 require ( dario.cat/mergo v1.0.0 github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Masterminds/semver v1.5.0 - github.com/alecthomas/kong v0.8.1 - github.com/bufbuild/buf v1.27.2 - github.com/crossplane/crossplane-runtime v1.16.0-rc.1 - github.com/docker/docker v25.0.2+incompatible + github.com/alecthomas/kong v0.9.0 + github.com/crossplane/crossplane-runtime v1.18.0-rc.0 + github.com/docker/docker v25.0.6+incompatible github.com/docker/go-connections v0.5.0 - github.com/emicklei/dot v1.6.1 + github.com/emicklei/dot v1.6.2 github.com/go-git/go-billy/v5 v5.5.0 - github.com/go-git/go-git/v5 v5.11.0 - github.com/golang-jwt/jwt/v5 v5.2.0 + github.com/go-git/go-git/v5 v5.12.0 + github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.19.0 + github.com/google/go-containerregistry v0.19.1 github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2 github.com/jmattheis/goverter v1.3.2 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.11.0 github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e - golang.org/x/sync v0.6.0 - google.golang.org/grpc v1.61.0 + golang.org/x/sync v0.7.0 + google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 - google.golang.org/protobuf v1.31.0 - k8s.io/api v0.29.1 - k8s.io/apiextensions-apiserver v0.29.1 - k8s.io/apimachinery v0.29.1 - k8s.io/apiserver v0.29.1 + google.golang.org/protobuf v1.34.2 + k8s.io/api v0.30.1 + k8s.io/apiextensions-apiserver v0.30.0 + k8s.io/apimachinery v0.30.1 + k8s.io/apiserver v0.30.0 k8s.io/cli-runtime v0.29.1 - k8s.io/client-go v0.29.1 - k8s.io/code-generator v0.29.1 + k8s.io/client-go v0.30.1 + k8s.io/code-generator v0.30.0 k8s.io/kubectl v0.29.1 k8s.io/metrics v0.29.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.17.0 + sigs.k8s.io/controller-runtime v0.18.2 sigs.k8s.io/controller-tools v0.14.0 - sigs.k8s.io/e2e-framework v0.3.0 + sigs.k8s.io/e2e-framework v0.4.0 sigs.k8s.io/kind v0.20.0 sigs.k8s.io/yaml v1.4.0 ) require ( - connectrpc.com/connect v1.13.0 // indirect - connectrpc.com/otelconnect v0.6.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect + github.com/containerd/log v0.1.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/distribution/reference v0.5.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -65,37 +63,38 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jdx/go-netrc v1.0.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/sergi/go-diff v1.1.0 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect + go.opentelemetry.io/otel/sdk v1.26.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect ) require ( - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -121,50 +120,43 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 // indirect - github.com/aws/smithy-go v1.19.0 + github.com/aws/smithy-go v1.20.2 github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bufbuild/protocompile v0.6.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dave/jennifer v1.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/cli v24.0.7+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.1 + github.com/docker/docker-credential-helpers v0.8.2 github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect - github.com/felixge/fgprof v0.9.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-chi/chi/v5 v5.0.11 // indirect - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gobuffalo/flect v1.0.2 // indirect - github.com/gofrs/uuid/v5 v5.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 // indirect - github.com/google/uuid v1.6.0 + github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.4 // indirect - github.com/klauspost/pgzip v1.2.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -173,49 +165,41 @@ require ( github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/profile v1.7.0 // indirect - github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rs/cors v1.10.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tetratelabs/wazero v1.6.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vladimirvivien/gexe v0.2.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/sdk v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect - go.uber.org/atomic v1.11.0 // indirect + go.opentelemetry.io/otel v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/otel/trace v1.26.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 + go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.24.0 + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.20.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.29.1 // indirect - k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect - k8s.io/klog/v2 v2.110.1 - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/component-base v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index f3bdb7158..e8b05366b 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -connectrpc.com/connect v1.13.0 h1:lGs5maZZzWOOD+PFFiOt5OncKmMsk9ZdPwpy5jcmaYg= -connectrpc.com/connect v1.13.0/go.mod h1:uHAFHtYgeSZJxXrkN1IunDpKghnTXhYbVh0wW4StPW0= -connectrpc.com/otelconnect v0.6.0 h1:VJAdQL9+sgdUw9+7+J+jq8pQo/h1S7tSFv2+vDcR7bU= -connectrpc.com/otelconnect v0.6.0/go.mod h1:jdcs0uiwXQVmSMgTJ2dAaWR5VbpNd7QKNkuoH7n86RA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -47,14 +43,14 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0 github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0= -github.com/alecthomas/assert/v2 v2.1.0/go.mod h1:b/+1DI2Q6NckYi+3mXyH3wFb8qG37K/DuK80n7WefXA= -github.com/alecthomas/kong v0.8.1 h1:acZdn3m4lLRobeh3Zi2S2EpnXTd1mOL6U7xVml+vfkY= -github.com/alecthomas/kong v0.8.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= -github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE= -github.com/alecthomas/repr v0.1.0/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU= +github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v0.9.0 h1:G5diXxc85KvoV2f0ZRVuMsi45IrBgx9zDNGNj165aPA= +github.com/alecthomas/kong v0.9.0/go.mod h1:Y47y5gKfHp1hDc7CH7OeXgLIpp+Q2m1Ni0L5s3bI8Os= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= @@ -91,21 +87,17 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5 github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE9iTYD0gFmXVax9E= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 h1:G5IT+PEpFY0CDb3oITDP9tkmLrHkVD8Ny+elUmBqVYI= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bufbuild/buf v1.27.2 h1:uX2kvZfPfRoOsrxUW4LwpykSyH+wI5dUnIG0QWHDCCU= -github.com/bufbuild/buf v1.27.2/go.mod h1:7RImDhFDqhEsdK5wbuMhoVSlnrMggGGcd3s9WozvHtM= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -129,13 +121,12 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crossplane/crossplane-runtime v1.16.0-rc.1 h1:wBvfaEgDdYa47qovPWYc80IGTBw17B3zw3yf2Q2NNkQ= -github.com/crossplane/crossplane-runtime v1.16.0-rc.1/go.mod h1:kRcJjJQmBFrR2n/KhwL8wYS7xNfq3D8eK4JliEScOHI= +github.com/crossplane/crossplane-runtime v1.18.0-rc.0 h1:1QoWF8LSsaJ7ff+vt7NhjrBtSpIHxISSsXCtkndu7/A= +github.com/crossplane/crossplane-runtime v1.18.0-rc.0/go.mod h1:vtglCrnnbq2HurAk9yLHa4qS0bbnCxaKL7C21cQcB/0= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= @@ -159,44 +150,40 @@ github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1x github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v25.0.2+incompatible h1:/OaKeauroa10K4Nqavw4zlhcDq/WBcPMc5DbjOGgozY= -github.com/docker/docker v25.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI= -github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= -github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= -github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA= -github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -205,13 +192,11 @@ github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -226,18 +211,14 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= -github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -252,12 +233,12 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= +github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -271,19 +252,17 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= -github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2 h1:ChuUQ1y5Vf+Eev+UgEed/ljibTIcWY7mYPtWYLK7fxU= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20230919002926-dbcd01c402b2/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa/go.mod h1:KdL98/Va8Dy1irB6lTxIRIQ7bQj4lbrlvqUzKEQ+ZBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= -github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240422182052-72c8669ad3e7 h1:3q13T5NW3mlTJZM6B5UAsf2N5NYFbYWIyI3W8DlvBDU= +github.com/google/pprof v0.0.0-20240422182052-72c8669ad3e7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -297,11 +276,10 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -309,11 +287,7 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jdx/go-netrc v1.0.0 h1:QbLMLyCZGj0NA8glAhxUpf1zDg6cxnWgMBbjq40W0gQ= -github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jmattheis/goverter v1.3.2 h1:KBuJkqYtZAMsK6QG11+3RdxXZJWwULl+r0M6RWlXU4s= github.com/jmattheis/goverter v1.3.2/go.mod h1:Il/E+0riIfIgRBUpM+Fnh2s8/sJhMp5NeDZZenTd6S4= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -330,10 +304,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= -github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -351,8 +322,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -379,10 +348,10 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -392,38 +361,32 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= -github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= @@ -445,10 +408,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tetratelabs/wazero v1.6.0 h1:z0H1iikCdP8t+q341xqepY4EWvHEw8Es7tlqiVzlP3g= -github.com/tetratelabs/wazero v1.6.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e h1:aNzUuv4ZKH2OT3Qv6dpZxkMPDOfl/6MoS79T/zUzako= github.com/upbound/up-sdk-go v0.1.1-0.20240122203953-2d00664aab8e/go.mod h1:IDIbYDb9fbedtxCc2CrdGcVRol6la7z2gkKh0VYWVGk= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= @@ -471,36 +432,32 @@ go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= -go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= +go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -511,11 +468,11 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -525,8 +482,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -543,11 +500,11 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -556,8 +513,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -570,21 +527,19 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -592,8 +547,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -613,15 +568,14 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -635,17 +589,17 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -658,8 +612,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -672,7 +626,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -683,43 +636,42 @@ gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apiextensions-apiserver v0.29.1 h1:S9xOtyk9M3Sk1tIpQMu9wXHm5O2MX6Y1kIpPMimZBZw= -k8s.io/apiextensions-apiserver v0.29.1/go.mod h1:zZECpujY5yTW58co8V2EQR4BD6A9pktVgHhvc0uLfeU= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.1 h1:e2wwHUfEmMsa8+cuft8MT56+16EONIEK8A/gpBSco+g= -k8s.io/apiserver v0.29.1/go.mod h1:V0EpkTRrJymyVT3M49we8uh2RvXf7fWC5XLB0P3SwRw= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= +k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M= +k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY= k8s.io/cli-runtime v0.29.1 h1:By3WVOlEWYfyxhGko0f/IuAOLQcbBSMzwSaDren2JUs= k8s.io/cli-runtime v0.29.1/go.mod h1:vjEY9slFp8j8UoMhV5AlO8uulX9xk6ogfIesHobyBDU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= -k8s.io/code-generator v0.29.1 h1:8ba8BdtSmAVHgAMpzThb/fuyQeTRtN7NtN7VjMcDLew= -k8s.io/code-generator v0.29.1/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= -k8s.io/component-base v0.29.1 h1:MUimqJPCRnnHsskTTjKD+IC1EHBbRCVyi37IoFBrkYw= -k8s.io/component-base v0.29.1/go.mod h1:fP9GFjxYrLERq1GcWWZAE3bqbNcDKDytn2srWuHTtKc= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= +k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= +k8s.io/code-generator v0.30.0 h1:3VUVqHvWFSVSm9kqL/G6kD4ZwNdHF6J/jPyo3Jgjy3k= +k8s.io/code-generator v0.30.0/go.mod h1:mBMZhfRR4IunJUh2+7LVmdcWwpouCH5+LNPkZ3t/v7Q= +k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= +k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.29.1 h1:rWnW3hi/rEUvvg7jp4iYB68qW5un/urKbv7fu3Vj0/s= k8s.io/kubectl v0.29.1/go.mod h1:SZzvLqtuOJYSvZzPZR9weSuP0wDQ+N37CENJf0FhDF4= k8s.io/metrics v0.29.1 h1:qutc3aIPMCniMuEApuLaeYX47rdCn8eycVDx7R6wMlQ= k8s.io/metrics v0.29.1/go.mod h1:JrbV2U71+v7d/9qb90UVKL8r0uJ6Z2Hy4V7mDm05cKs= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/controller-runtime v0.17.0 h1:fjJQf8Ukya+VjogLO6/bNX9HE6Y2xpsO5+fyS26ur/s= -sigs.k8s.io/controller-runtime v0.17.0/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4= +sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLqlNpx+Q= +sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw= sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= -sigs.k8s.io/e2e-framework v0.3.0 h1:eqQALBtPCth8+ulTs6lcPK7ytV5rZSSHJzQHZph4O7U= -sigs.k8s.io/e2e-framework v0.3.0/go.mod h1:C+ef37/D90Dc7Xq1jQnNbJYscrUGpxrWog9bx2KIa+c= +sigs.k8s.io/e2e-framework v0.4.0 h1:4yYmFDNNoTnazqmZJXQ6dlQF1vrnDbutmxlyvBpC5rY= +sigs.k8s.io/e2e-framework v0.4.0/go.mod h1:JilFQPF1OL1728ABhMlf9huse7h+uBJDXl9YeTs49A8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= @@ -730,7 +682,6 @@ sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/duplicate_proto_type.sh b/hack/duplicate_proto_type.sh new file mode 100755 index 000000000..013e1a26a --- /dev/null +++ b/hack/duplicate_proto_type.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +# Usage example: +# +# ./duplicate_proto_type.sh apiextensions/fn/proto/v1/run_function.proto apiextensions/fn/proto/v1beta1 +# +# The above command will create zz_generated.run_function.proto in the v1beta1 +# directory. The most specific segment of the package name is assumed to be the +# same as the target directory (i.e. v1beta1). + +set -e + +FROM_PATH=${1} +TO_DIR=${2} + +DO_NOT_EDIT="// Generated from ${FROM_PATH} by ${0}. DO NOT EDIT." + +FROM_DIR=$(dirname ${FROM_PATH}) +FROM_FILE=$(basename ${FROM_PATH}) +FROM_PACKAGE=$(basename ${FROM_DIR}) + +TO_PACKAGE=$(basename ${TO_DIR}) +TO_PATH="${TO_DIR}/zz_generated_${FROM_FILE}" + +sed -r \ + -e "s#^package (.+)\.${FROM_PACKAGE};\$#${DO_NOT_EDIT}\n\npackage \1.${TO_PACKAGE};#" \ + -e "s#^option go_package = \"(.+)/${FROM_PACKAGE}\";\$#option go_package = \"\1/${TO_PACKAGE}\";#" \ + ${FROM_PATH} > ${TO_PATH} + +echo "Duplicated ${FROM_PATH} (package ${FROM_PACKAGE}) to ${TO_PATH} (package ${TO_PACKAGE})." diff --git a/hack/linter-violation.tmpl b/hack/linter-violation.tmpl index 1a91dfa06..d1f43403f 100644 --- a/hack/linter-violation.tmpl +++ b/hack/linter-violation.tmpl @@ -1,3 +1,3 @@ `{{violation.rule}}`: {{violation.message}} -Refer to Crossplane's [coding style documentation](https://github.com/crossplane/crossplane/blob/master/CONTRIBUTING.md#coding-style-and-linting) for more information. \ No newline at end of file +Refer to Crossplane's [coding style documentation](https://github.com/crossplane/crossplane/blob/main/CONTRIBUTING.md#coding-style-and-linting) for more information. \ No newline at end of file diff --git a/internal/controller/apiextensions/claim/connection.go b/internal/controller/apiextensions/claim/connection.go index 0ec175c94..2e48506bf 100644 --- a/internal/controller/apiextensions/claim/connection.go +++ b/internal/controller/apiextensions/claim/connection.go @@ -43,7 +43,7 @@ const ( // NopConnectionUnpublisher is a ConnectionUnpublisher that does nothing. type NopConnectionUnpublisher struct{} -// NewNopConnectionUnpublisher returns a new NopConnectionUnpublisher +// NewNopConnectionUnpublisher returns a new NopConnectionUnpublisher. func NewNopConnectionUnpublisher() *NopConnectionUnpublisher { return &NopConnectionUnpublisher{} } @@ -146,6 +146,8 @@ func (a *APIConnectionPropagator) PropagateConnection(ctx context.Context, to re resource.AllowUpdateIf(func(current, desired runtime.Object) bool { // We consider the update to be a no-op and don't allow it if the // current and existing secret data are identical. + + //nolint:forcetypeassert // These will always be secrets. return !cmp.Equal(current.(*corev1.Secret).Data, desired.(*corev1.Secret).Data, cmpopts.EquateEmpty()) }), ) diff --git a/internal/controller/apiextensions/claim/connection_test.go b/internal/controller/apiextensions/claim/connection_test.go index 72b46c578..e4fc314f6 100644 --- a/internal/controller/apiextensions/claim/connection_test.go +++ b/internal/controller/apiextensions/claim/connection_test.go @@ -34,9 +34,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" ) -var ( - _ ConnectionPropagator = &APIConnectionPropagator{} -) +var _ ConnectionPropagator = &APIConnectionPropagator{} func TestPropagateConnection(t *testing.T) { errBoom := errors.New("boom") diff --git a/internal/controller/apiextensions/claim/fuzz_test.go b/internal/controller/apiextensions/claim/fuzz_test.go index 8a62cc088..411094bfb 100644 --- a/internal/controller/apiextensions/claim/fuzz_test.go +++ b/internal/controller/apiextensions/claim/fuzz_test.go @@ -31,7 +31,7 @@ import ( ) func FuzzPropagateConnection(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) cp := &fake.Composite{} cm := &fake.CompositeClaim{} @@ -62,7 +62,7 @@ func FuzzPropagateConnection(f *testing.F) { return nil }), }, - Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return nil }), } diff --git a/internal/controller/apiextensions/claim/object.go b/internal/controller/apiextensions/claim/object.go index 3051f8e68..0c4596883 100644 --- a/internal/controller/apiextensions/claim/object.go +++ b/internal/controller/apiextensions/claim/object.go @@ -61,21 +61,21 @@ type mergeConfig struct { srcfilter []string } -// withMergeOptions allows custom mergo.Config options +// withMergeOptions allows custom mergo.Config options. func withMergeOptions(opts ...func(*mergo.Config)) func(*mergeConfig) { return func(config *mergeConfig) { config.mergeOptions = opts } } -// withSrcFilter filters supplied keys from src map before merging +// withSrcFilter filters supplied keys from src map before merging. func withSrcFilter(keys ...string) func(*mergeConfig) { return func(config *mergeConfig) { config.srcfilter = keys } } -// merge a src map into dst map +// merge a src map into dst map. func merge(dst, src any, opts ...func(*mergeConfig)) error { if dst == nil || src == nil { // Nothing available to merge if dst or src are nil. diff --git a/internal/controller/apiextensions/claim/reconciler.go b/internal/controller/apiextensions/claim/reconciler.go index f1ffea553..d94c9883a 100644 --- a/internal/controller/apiextensions/claim/reconciler.go +++ b/internal/controller/apiextensions/claim/reconciler.go @@ -27,7 +27,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -37,7 +36,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" @@ -86,7 +84,7 @@ func ControllerName(name string) string { // managed using client-side apply, but should now be managed using server-side // apply. See https://github.com/kubernetes/kubernetes/issues/99003 for details. type ManagedFieldsUpgrader interface { - Upgrade(ctx context.Context, obj client.Object, ssaManager string, csaManagers ...string) error + Upgrade(ctx context.Context, obj client.Object, ssaManager string) error } // A CompositeSyncer binds and syncs the supplied claim with the supplied @@ -162,10 +160,10 @@ type DefaultsSelector interface { SelectDefaults(ctx context.Context, cm resource.CompositeClaim) error } -// A DefaultsSelectorFn is responsible for copying default values from the CompositeResourceDefinition +// A DefaultsSelectorFn is responsible for copying default values from the CompositeResourceDefinition. type DefaultsSelectorFn func(ctx context.Context, cm resource.CompositeClaim) error -// SelectDefaults copies default values from the XRD if necessary +// SelectDefaults copies default values from the XRD if necessary. func (fn DefaultsSelectorFn) SelectDefaults(ctx context.Context, cm resource.CompositeClaim) error { return fn(ctx, cm) } @@ -221,14 +219,6 @@ func defaultCRClaim(c client.Client) crClaim { // A ReconcilerOption configures a Reconciler. type ReconcilerOption func(*Reconciler) -// WithClient specifies how the Reconciler should interact with the Kubernetes -// API. -func WithClient(c client.Client) ReconcilerOption { - return func(r *Reconciler) { - r.client = c - } -} - // WithManagedFieldsUpgrader specifies how the Reconciler should upgrade claim // and composite resource (XR) managed fields from client-side apply to // server-side apply. @@ -300,8 +290,7 @@ func WithPollInterval(after time.Duration) ReconcilerOption { // The returned Reconciler will apply only the ObjectMetaConfigurator by // default; most callers should supply one or more CompositeConfigurators to // configure their composite resources. -func NewReconciler(m manager.Manager, of resource.CompositeClaimKind, with resource.CompositeKind, o ...ReconcilerOption) *Reconciler { - c := unstructured.NewClient(m.GetClient()) +func NewReconciler(c client.Client, of resource.CompositeClaimKind, with resource.CompositeKind, o ...ReconcilerOption) *Reconciler { r := &Reconciler{ client: c, gvkClaim: schema.GroupVersionKind(of), @@ -321,7 +310,7 @@ func NewReconciler(m manager.Manager, of resource.CompositeClaimKind, with resou } // Reconcile a composite resource claim with a concrete composite resource. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Complexity is tough to avoid here. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Complexity is tough to avoid here. log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -390,7 +379,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // to upgrade field managers if _this controller_ might have applied the XR // before using the default client-side apply field manager "crossplane", // but now wants to use server-side apply instead. - if err := r.managedFields.Upgrade(ctx, xr, FieldOwnerXR, "crossplane"); err != nil { + if err := r.managedFields.Upgrade(ctx, xr, FieldOwnerXR); err != nil { if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil } @@ -411,7 +400,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } if meta.WasDeleted(xr) && requiresForegroundDeletion { log.Debug("Waiting for the XR to finish deleting (foreground deletion)") - return reconcile.Result{Requeue: true}, nil + return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, cm), errUpdateClaimStatus) } do := &client.DeleteOptions{} if requiresForegroundDeletion { @@ -484,6 +473,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco cm.SetConditions(xpv1.ReconcileSuccess()) + // Copy any custom status conditions from the XR to the claim. + for _, cType := range xr.GetClaimConditionTypes() { + c := xr.GetCondition(cType) + cm.SetConditions(c) + } + if !resource.IsConditionTrue(xr.GetCondition(xpv1.TypeReady)) { record.Event(cm, event.Normal(reasonBind, "Composite resource is not yet ready")) diff --git a/internal/controller/apiextensions/claim/reconciler_test.go b/internal/controller/apiextensions/claim/reconciler_test.go index 6d0da73b5..919bae6de 100644 --- a/internal/controller/apiextensions/claim/reconciler_test.go +++ b/internal/controller/apiextensions/claim/reconciler_test.go @@ -28,7 +28,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -36,7 +35,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" @@ -47,10 +45,10 @@ func TestReconcile(t *testing.T) { now := metav1.Now() type args struct { - mgr manager.Manager - of resource.CompositeClaimKind - with resource.CompositeKind - opts []ReconcilerOption + client client.Client + of resource.CompositeClaimKind + with resource.CompositeKind + opts []ReconcilerOption } type want struct { r reconcile.Result @@ -65,11 +63,8 @@ func TestReconcile(t *testing.T) { "ClaimNotFound": { reason: "We should not return an error if the composite resource was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, want: want{ @@ -79,11 +74,8 @@ func TestReconcile(t *testing.T) { "GetClaimError": { reason: "We should return any error we encounter getting the claim.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, want: want{ @@ -94,19 +86,16 @@ func TestReconcile(t *testing.T) { "ReconciliationPaused": { reason: `If a claim has the pause annotation with value "true" we should stop reconciling and not requeue.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - cm.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + cm.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + })), }, }, want: want{ @@ -116,26 +105,25 @@ func TestReconcile(t *testing.T) { "ReconciliationUnpaused": { reason: "If a claim has the ReconcilePaused status condition but no paused annotation, the condition should change to ReconcileSuccess.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - // This claim was paused. - obj.(*claim.Unstructured).SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that our synced status condition changed - // from Paused to ReconcileSuccess. - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(Waiting()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + // This claim was paused. + obj.(*claim.Unstructured).SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that our synced status condition changed + // from Paused to ReconcileSuccess. + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(Waiting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), - WithConnectionPropagator(ConnectionPropagatorFn(func(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { return true, nil })), }, @@ -147,27 +135,24 @@ func TestReconcile(t *testing.T) { "GetCompositeError": { reason: "The reconcile should fail if we can't get the XR, unless it wasn't found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Return an error getting the XR. - return errBoom - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errGetComposite))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Return an error getting the XR. + return errBoom + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errGetComposite))) + })), }, }, want: want{ @@ -177,29 +162,26 @@ func TestReconcile(t *testing.T) { "CompositeAlreadyBoundError": { reason: "The reconcile should fail if the referenced XR is bound to another claim", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // This XR was created, and references another - // claim. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{Name: "some-other-claim"}) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Errorf(errFmtUnbound, "", "some-other-claim"))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // This XR was created, and references another + // claim. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{Name: "some-other-claim"}) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Errorf(errFmtUnbound, "", "some-other-claim"))) + })), }, }, want: want{ @@ -209,33 +191,32 @@ func TestReconcile(t *testing.T) { "DeleteCompositeError": { reason: "We should not try to delete if the resource is already gone.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists. - o.SetCreationTimestamp(now) - } - return nil - }), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteComposite))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists. + o.SetCreationTimestamp(now) + } + return nil }), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteComposite))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -246,22 +227,21 @@ func TestReconcile(t *testing.T) { "UnpublishConnectionDetailsError": { reason: "The reconcile should fail if we can't unpublish the claim's connection details.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), - WithConnectionUnpublisher(ConnectionUnpublisherFn(func(ctx context.Context, so resource.LocalConnectionSecretOwner, c managed.ConnectionDetails) error { + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errDeleteCDs))) + })), + }, + opts: []ReconcilerOption{ + WithConnectionUnpublisher(ConnectionUnpublisherFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ managed.ConnectionDetails) error { return errBoom })), }, @@ -273,23 +253,22 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "The reconcile should fail if we can't remove the claim's finalizer.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return errBoom }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), }, }, @@ -300,23 +279,22 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue if we successfully delete the resource.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.(*claim.Unstructured).SetDeletionTimestamp(&now) - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetDeletionTimestamp(&now) - cm.SetConditions(xpv1.Deleting()) - cm.SetConditions(xpv1.ReconcileSuccess()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.(*claim.Unstructured).SetDeletionTimestamp(&now) + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + cm.SetConditions(xpv1.ReconcileSuccess()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -327,30 +305,29 @@ func TestReconcile(t *testing.T) { "SuccessfulForegroundDelete": { reason: "We should requeue if we successfully delete the bound composite resource using Foreground deletion", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - // We want to foreground delete. - fg := xpv1.CompositeDeleteForeground - o.SetCompositeDeletePolicy(&fg) - case *composite.Unstructured: - // Pretend the XR exists and is bound. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - } - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + o.SetCompositeDeletePolicy(&fg) + case *composite.Unstructured: + // Pretend the XR exists and is bound. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + } + return nil }), + MockDelete: test.NewMockDeleteFn(nil), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -361,31 +338,40 @@ func TestReconcile(t *testing.T) { "ForegroundDeleteWaitForCompositeDeletion": { reason: "We should requeue if we successfully deleted the bound composite resource using Foreground deletion and it has not yet been deleted", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - o.SetDeletionTimestamp(&now) - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - // We want to foreground delete. - fg := xpv1.CompositeDeleteForeground - o.SetCompositeDeletePolicy(&fg) - case *composite.Unstructured: - // Pretend the XR exists and is bound, but is - // being deleted. - o.SetCreationTimestamp(now) - o.SetDeletionTimestamp(&now) - o.SetClaimReference(&claim.Reference{}) - } - return nil - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + o.SetDeletionTimestamp(&now) + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + o.SetCompositeDeletePolicy(&fg) + case *composite.Unstructured: + // Pretend the XR exists and is bound, but is + // being deleted. + o.SetCreationTimestamp(now) + o.SetDeletionTimestamp(&now) + o.SetClaimReference(&claim.Reference{}) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // We want to foreground delete. + fg := xpv1.CompositeDeleteForeground + cm.SetCompositeDeletePolicy(&fg) + + // Check that we set our status condition. + cm.SetDeletionTimestamp(&now) + cm.SetConditions(xpv1.Deleting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), }, }, @@ -396,17 +382,16 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should fail the reconcile if we can't add the claim's finalizer", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) - })), - }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return errBoom }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), }, }, @@ -417,19 +402,18 @@ func TestReconcile(t *testing.T) { "SyncCompositeError": { reason: "We should fail the reconcile if we can't bind and sync the claim with a composite resource", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSync))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSync))) - })), - }), WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return errBoom })), + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return errBoom })), }, }, want: want{ @@ -439,35 +423,34 @@ func TestReconcile(t *testing.T) { "CompositeNotReady": { reason: "We should return early if the bound composite resource is not yet ready", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is bound, but is - // still being created. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Creating()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(Waiting()) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is bound, but is + // still being created. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Creating()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(Waiting()) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), }, }, want: want{ @@ -477,34 +460,33 @@ func TestReconcile(t *testing.T) { "PropagateConnectionError": { reason: "We should fail the reconcile if we can't propagate the bound XR's connection details", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is available. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Available()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPropagateCDs))) - })), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + } + return nil }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPropagateCDs))) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), - WithConnectionPropagator(ConnectionPropagatorFn(func(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { return false, errBoom })), }, @@ -516,36 +498,122 @@ func TestReconcile(t *testing.T) { "SuccessfulReconcile": { reason: "We should not requeue if we successfully synced the composite resource and propagated its connection details", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + } + return nil + }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConnectionDetailsLastPublishedTime(&now) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(xpv1.Available()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - switch o := obj.(type) { - case *claim.Unstructured: - // We won't try to get an XR unless the claim - // references one. - o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - case *composite.Unstructured: - // Pretend the XR exists and is available. - o.SetCreationTimestamp(now) - o.SetClaimReference(&claim.Reference{}) - o.SetConditions(xpv1.Available()) - } - return nil - }), - MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { - // Check that we set our status condition. - cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) - cm.SetConnectionDetailsLastPublishedTime(&now) - cm.SetConditions(xpv1.ReconcileSuccess()) - cm.SetConditions(xpv1.Available()) - })), + WithClaimFinalizer(resource.FinalizerFns{ + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { + return true, nil + })), + }, + }, + want: want{ + r: reconcile.Result{Requeue: false}, + }, + }, + "ClaimConditions": { + reason: "We should copy custom conditions from the XR if seen in the claimConditions array.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + switch o := obj.(type) { + case *claim.Unstructured: + // We won't try to get an XR unless the claim + // references one. + o.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + // The system conditions are already set. + o.SetConditions(xpv1.ReconcileSuccess()) + o.SetConditions(xpv1.Available()) + // Database was marked as creating in a prior reconciliation. + o.SetConditions(xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + }) + case *composite.Unstructured: + // Pretend the XR exists and is available. + o.SetCreationTimestamp(now) + o.SetClaimReference(&claim.Reference{}) + o.SetConditions(xpv1.Available()) + o.SetConditions( + // Database has become ready. + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + }, + // Bucket is a new condition. + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + }, + // Internal condition should not be copied over as it is not in + // claimConditions. + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionFalse, + Reason: "Syncing", + }, + ) + // Database and Bucket are claim conditions so they should be + // copied over. + o.SetClaimConditionTypes("DatabaseReady", "BucketReady") + } + return nil + }), + MockStatusUpdate: WantClaim(t, NewClaim(func(cm *claim.Unstructured) { + // Check that we set our status condition. + cm.SetResourceReference(&corev1.ObjectReference{Name: "cool-composite"}) + cm.SetConnectionDetailsLastPublishedTime(&now) + cm.SetConditions(xpv1.ReconcileSuccess()) + cm.SetConditions(xpv1.Available()) + cm.SetConditions( + // Database condition should have been updated to show ready. + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + }, + // Bucket condition should have been created. + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + }, + ) + })), + }, + opts: []ReconcilerOption{ WithClaimFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { return nil }, + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), - WithCompositeSyncer(CompositeSyncerFn(func(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { return nil })), - WithConnectionPropagator(ConnectionPropagatorFn(func(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { + WithCompositeSyncer(CompositeSyncerFn(func(_ context.Context, _ *claim.Unstructured, _ *composite.Unstructured) error { return nil })), + WithConnectionPropagator(ConnectionPropagatorFn(func(_ context.Context, _ resource.LocalConnectionSecretOwner, _ resource.ConnectionSecretOwner) (propagated bool, err error) { return true, nil })), }, @@ -558,7 +626,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, tc.args.of, tc.args.with, tc.args.opts...) + r := NewReconciler(tc.args.client, tc.args.of, tc.args.with, tc.args.opts...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { @@ -582,8 +650,10 @@ func NewClaim(m ...ClaimModifier) *claim.Unstructured { } // A status update function that ensures the supplied object is the claim we want. -func WantClaim(t *testing.T, want *claim.Unstructured) func(ctx context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { - return func(ctx context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { +func WantClaim(t *testing.T, want *claim.Unstructured) func(_ context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() + return func(_ context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() // Normally we use a custom Equal method on conditions to ignore the // lastTransitionTime, but we're using unstructured types here where // the conditions are just a map[string]any. diff --git a/internal/controller/apiextensions/claim/syncer_csa.go b/internal/controller/apiextensions/claim/syncer_csa.go index e342c31ca..47bbf4f65 100644 --- a/internal/controller/apiextensions/claim/syncer_csa.go +++ b/internal/controller/apiextensions/claim/syncer_csa.go @@ -67,7 +67,7 @@ func NewClientSideCompositeSyncer(c client.Client, ng names.NameGenerator) *Clie // Sync the supplied claim with the supplied composite resource (XR). Syncing // may involve creating and binding the XR. -func (s *ClientSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { //nolint:gocyclo // This complex process seems easier to follow in one long method. +func (s *ClientSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { // First we sync claim -> XR. // It's possible we're being asked to configure a statically provisioned XR. diff --git a/internal/controller/apiextensions/claim/syncer_csa_test.go b/internal/controller/apiextensions/claim/syncer_csa_test.go index 5c88eadd4..863caff2a 100644 --- a/internal/controller/apiextensions/claim/syncer_csa_test.go +++ b/internal/controller/apiextensions/claim/syncer_csa_test.go @@ -88,7 +88,7 @@ func TestClientSideSync(t *testing.T) { "GenerateXRNameError": { reason: "We should return an error if we can't generate an XR name.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return errBoom }), }, @@ -132,7 +132,7 @@ func TestClientSideSync(t *testing.T) { "UpdateClaimResourceRefError": { reason: "We should return an error if we can't update the claim to persist its resourceRef.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), @@ -184,7 +184,7 @@ func TestClientSideSync(t *testing.T) { "ApplyXRError": { reason: "We should return an error if we can't apply the XR.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), @@ -239,7 +239,7 @@ func TestClientSideSync(t *testing.T) { "UpdateClaimStatusError": { reason: "We should return an error if we can't update the claim's status.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), @@ -296,7 +296,7 @@ func TestClientSideSync(t *testing.T) { "XRDoesNotExist": { reason: "We should create, bind, and sync with an XR when none exists.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { cd.SetName("cool-claim-random") return nil }), diff --git a/internal/controller/apiextensions/claim/syncer_ssa.go b/internal/controller/apiextensions/claim/syncer_ssa.go index 6b0612997..2a36ea6cb 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa.go +++ b/internal/controller/apiextensions/claim/syncer_ssa.go @@ -21,8 +21,6 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/csaupgrade" "sigs.k8s.io/controller-runtime/pkg/client" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -36,7 +34,7 @@ import ( "github.com/crossplane/crossplane/internal/xcrd" ) -// Error strings +// Error strings. const ( errCreatePatch = "cannot create patch" errPatchFieldManagers = "cannot patch field managers" @@ -54,7 +52,7 @@ const ( type NopManagedFieldsUpgrader struct{} // Upgrade does nothing. -func (u *NopManagedFieldsUpgrader) Upgrade(_ context.Context, _ client.Object, _ string, _ ...string) error { +func (u *NopManagedFieldsUpgrader) Upgrade(_ context.Context, _ client.Object, _ string) error { return nil } @@ -74,32 +72,76 @@ func NewPatchingManagedFieldsUpgrader(w client.Writer) *PatchingManagedFieldsUpg // Upgrade the supplied object's field managers from client-side to server-side // apply. -func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string, csaManagers ...string) error { - // UpgradeManagedFieldsPatch removes or replaces the specified CSA managers. - // Unfortunately most Crossplane controllers use CSA manager "crossplane". - // So we could for example fight with the XR controller: - // - // 1. We remove CSA manager "crossplane", triggering XR controller watch - // 2. XR controller uses CSA manager "crossplane", triggering our watch - // 3. Back to step 1 :) - // - // In practice we only need to upgrade once, to ensure we don't share fields - // that only this controller has ever applied with "crossplane". We assume - // that if our SSA manager already exists, we've done the upgrade. - for _, e := range obj.GetManagedFields() { +// +// This is a multi-step process. +// +// Step 1: All fields are owned by either manager 'crossplane', operation +// 'Update' or manager 'apiextensions.crossplane.io/composite', operation +// 'Apply'. This represents all fields set by the claim or XR controller up to +// this point. +// +// Step 2: Upgrade is called for the first time. We delete all field managers. +// +// Step 3: The claim controller server-side applies its fully specified intent +// as field manager 'apiextensions.crossplane.io/claim'. This becomes the +// manager of all the fields that are part of the claim controller's fully +// specified intent. All existing fields the claim controller didn't specify +// become owned by a special manager - 'before-first-apply', operation 'Update'. +// +// Step 4: Upgrade is called for the second time. It deletes the +// 'before-first-apply' field manager entry. Only the claim field manager +// remains. +// +// Step 5: Eventually the XR reconciler updates a field (e.g. spec.resourceRefs) +// and becomes owner of that field. +func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object, ssaManager string) error { + // The XR doesn't exist, nothing to upgrade. + if !meta.WasCreated(obj) { + return nil + } + + foundSSA := false + foundBFA := false + idxBFA := -1 + + for i, e := range obj.GetManagedFields() { if e.Manager == ssaManager { - return nil + foundSSA = true + } + if e.Manager == "before-first-apply" { + foundBFA = true + idxBFA = i } } - p, err := csaupgrade.UpgradeManagedFieldsPatch(obj, sets.New[string](csaManagers...), ssaManager) - if err != nil { - return errors.Wrap(err, errCreatePatch) - } - if p == nil { - // No patch means there's nothing to upgrade. + + switch { + // If our SSA field manager exists and the before-first-apply field manager + // doesn't, we've already done the upgrade. Don't do it again. + case foundSSA && !foundBFA: return nil + + // We found our SSA field manager but also before-first-apply. It should now + // be safe to delete before-first-apply. + case foundSSA && foundBFA: + p := []byte(fmt.Sprintf(`[ + {"op":"remove","path":"/metadata/managedFields/%d"}, + {"op":"replace","path":"/metadata/resourceVersion","value":"%s"} + ]`, idxBFA, obj.GetResourceVersion())) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot remove before-first-apply from field managers") + + // We didn't find our SSA field manager or the before-first-apply field + // manager. This means we haven't started the upgrade. The first thing we + // want to do is clear all managed fields. After we do this we'll let our + // SSA field manager apply the fields it cares about. The result will be + // that our SSA field manager shares ownership with a new manager named + // 'before-first-apply'. + default: + p := []byte(fmt.Sprintf(`[ + {"op":"replace","path": "/metadata/managedFields","value": [{}]}, + {"op":"replace","path":"/metadata/resourceVersion","value":"%s"} + ]`, obj.GetResourceVersion())) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot clear field managers") } - return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), errPatchFieldManagers) } // A ServerSideCompositeSyncer binds and syncs a claim with a composite resource @@ -117,7 +159,7 @@ func NewServerSideCompositeSyncer(c client.Client, ng names.NameGenerator) *Serv // Sync the supplied claim with the supplied composite resource (XR). Syncing // may involve creating and binding the XR. -func (s *ServerSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { //nolint:gocyclo // This complex process seems easier to follow in one long method. +func (s *ServerSideCompositeSyncer) Sync(ctx context.Context, cm *claim.Unstructured, xr *composite.Unstructured) error { // First we sync claim -> XR. // Create an empty XR patch object. We'll use this object to ensure we only diff --git a/internal/controller/apiextensions/claim/syncer_ssa_test.go b/internal/controller/apiextensions/claim/syncer_ssa_test.go index 31ea68736..1d67e9584 100644 --- a/internal/controller/apiextensions/claim/syncer_ssa_test.go +++ b/internal/controller/apiextensions/claim/syncer_ssa_test.go @@ -65,7 +65,7 @@ func TestServerSideSync(t *testing.T) { "GenerateXRNameError": { reason: "We should return an error if we can't generate an XR name.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return errBoom }), }, @@ -88,7 +88,7 @@ func TestServerSideSync(t *testing.T) { "WeirdClaimSpec": { reason: "We should return an error if the claim spec is not an object.", params: params{ - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -117,7 +117,7 @@ func TestServerSideSync(t *testing.T) { // Fail to update the claim. MockUpdate: test.NewMockUpdateFn(errBoom), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -170,7 +170,7 @@ func TestServerSideSync(t *testing.T) { // Fail to patch the XR. MockPatch: test.NewMockPatchFn(errBoom), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -228,7 +228,7 @@ func TestServerSideSync(t *testing.T) { return nil }), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -285,7 +285,7 @@ func TestServerSideSync(t *testing.T) { // Fail to update the claim's status. MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil }), }, @@ -345,7 +345,7 @@ func TestServerSideSync(t *testing.T) { // Update the claim's status. MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), }, - ng: names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + ng: names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { // Generate a name for the XR. cd.SetName("cool-claim-random") return nil diff --git a/internal/controller/apiextensions/composite/api.go b/internal/controller/apiextensions/composite/api.go index 248f83b30..ede13bd99 100644 --- a/internal/controller/apiextensions/composite/api.go +++ b/internal/controller/apiextensions/composite/api.go @@ -99,6 +99,8 @@ func (a *APIFilteredSecretPublisher) PublishConnection(ctx context.Context, o re resource.AllowUpdateIf(func(current, desired runtime.Object) bool { // We consider the update to be a no-op and don't allow it if the // current and existing secret data are identical. + + //nolint:forcetypeassert // These will always be secrets. return !cmp.Equal(current.(*corev1.Secret).Data, desired.(*corev1.Secret).Data, cmpopts.EquateEmpty()) }), ) diff --git a/internal/controller/apiextensions/composite/api_test.go b/internal/controller/apiextensions/composite/api_test.go index 850ddf3de..d8b3fbf6e 100644 --- a/internal/controller/apiextensions/composite/api_test.go +++ b/internal/controller/apiextensions/composite/api_test.go @@ -323,7 +323,7 @@ func TestFetchRevision(t *testing.T) { }), }, // This should not be called. - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { return errBoom }), + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return errBoom }), }, args: args{ cr: &fake.Composite{ @@ -359,7 +359,7 @@ func TestFetchRevision(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { // Ensure we were updated to reference the latest CompositionRevision. want := &fake.Composite{ CompositionReferencer: fake.CompositionReferencer{ @@ -416,7 +416,7 @@ func TestFetchRevision(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { // Ensure we were updated to reference the latest CompositionRevision. want := &fake.Composite{ CompositionReferencer: fake.CompositionReferencer{ @@ -474,7 +474,7 @@ func TestFetchRevision(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(c context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return errBoom }), }, @@ -705,7 +705,8 @@ func TestSelectorResolver(t *testing.T) { } t.Errorf("wrong query") return nil - }}, + }, + }, cp: &fake.Composite{ CompositionSelector: fake.CompositionSelector{Sel: sel}, }, diff --git a/internal/controller/apiextensions/composite/composed.go b/internal/controller/apiextensions/composite/composed.go index 14b293a5b..8d89a5c75 100644 --- a/internal/controller/apiextensions/composite/composed.go +++ b/internal/controller/apiextensions/composite/composed.go @@ -35,8 +35,14 @@ type ComposedResource struct { ResourceName ResourceName // Ready indicates whether this composed resource is ready - i.e. whether - // all of its readiness checks passed. + // all of its readiness checks passed. Setting it to false will cause the + // XR to be marked as not ready. Ready bool + + // Synced indicates whether the composition process was able to sync the + // composed resource with its desired state. Setting it to false will cause + // the XR to be marked as not synced. + Synced bool } // ComposedResourceState represents a composed resource (either desired or diff --git a/internal/controller/apiextensions/composite/composition_functions.go b/internal/controller/apiextensions/composite/composition_functions.go index c270858d5..c8f2880f5 100644 --- a/internal/controller/apiextensions/composite/composition_functions.go +++ b/internal/controller/apiextensions/composite/composition_functions.go @@ -19,8 +19,8 @@ import ( "context" "crypto/sha256" "fmt" - "reflect" "sort" + "strings" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/structpb" @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" "sigs.k8s.io/controller-runtime/pkg/client" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/meta" @@ -42,7 +43,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/names" ) @@ -68,12 +70,13 @@ const ( errFmtApplyCD = "cannot apply composed resource %q" errFmtFetchCDConnectionDetails = "cannot fetch connection details for composed resource %q (a %s named %s)" errFmtUnmarshalPipelineStepInput = "cannot unmarshal input for Composition pipeline step %q" + errFmtGetCredentialsFromSecret = "cannot get Composition pipeline step %q credential %q from Secret" errFmtRunPipelineStep = "cannot run Composition pipeline step %q" + errFmtControllerMismatch = "refusing to delete composed resource %q that is controlled by %s %q" errFmtDeleteCD = "cannot delete composed resource %q (a %s named %s)" errFmtUnmarshalDesiredCD = "cannot unmarshal desired composed resource %q from RunFunctionResponse" errFmtCDAsStruct = "cannot encode composed resource %q to protocol buffer Struct well-known type" errFmtFatalResult = "pipeline step %q returned a fatal result: %s" - errFmtFunctionMaxIterations = "step %q requirements didn't stabilize after the maximum number of iterations (%d)" ) // Server-side-apply field owners. We need two of these because it's possible @@ -99,13 +102,6 @@ const ( FunctionContextKeyEnvironment = "apiextensions.crossplane.io/environment" ) -const ( - // MaxRequirementsIterations is the maximum number of times a Function should be called, - // limiting the number of times it can request for extra resources, capped for - // safety. - MaxRequirementsIterations = 5 -) - // A FunctionComposer supports composing resources using a pipeline of // Composition Functions. It ignores the P&T resources array. type FunctionComposer struct { @@ -120,19 +116,20 @@ type xr struct { ComposedResourceObserver ComposedResourceGarbageCollector ExtraResourcesFetcher + ManagedFieldsUpgrader } // A FunctionRunner runs a single Composition Function. type FunctionRunner interface { // RunFunction runs the named Composition Function. - RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) + RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) } // A FunctionRunnerFn is a function that can run a Composition Function. -type FunctionRunnerFn func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) +type FunctionRunnerFn func(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) // RunFunction runs the named Composition Function with the supplied request. -func (fn FunctionRunnerFn) RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (fn FunctionRunnerFn) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { return fn(ctx, name, req) } @@ -151,14 +148,14 @@ func (fn ComposedResourceObserverFn) ObserveComposedResources(ctx context.Contex // A ExtraResourcesFetcher gets extra resources matching a selector. type ExtraResourcesFetcher interface { - Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) + Fetch(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) } // An ExtraResourcesFetcherFn gets extra resources matching the selector. -type ExtraResourcesFetcherFn func(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) +type ExtraResourcesFetcherFn func(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) // Fetch gets extra resources matching the selector. -func (fn ExtraResourcesFetcherFn) Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { +func (fn ExtraResourcesFetcherFn) Fetch(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) { return fn(ctx, rs) } @@ -178,6 +175,14 @@ func (fn ComposedResourceGarbageCollectorFn) GarbageCollectComposedResources(ctx return fn(ctx, owner, observed, desired) } +// A ManagedFieldsUpgrader upgrades an objects managed fields from client-side +// apply to server-side apply. This is necessary when an object was previously +// managed using client-side apply, but should now be managed using server-side +// apply. See https://github.com/kubernetes/kubernetes/issues/99003 for details. +type ManagedFieldsUpgrader interface { + Upgrade(ctx context.Context, obj client.Object) error +} + // A FunctionComposerOption is used to configure a FunctionComposer. type FunctionComposerOption func(*FunctionComposer) @@ -197,14 +202,6 @@ func WithComposedResourceObserver(g ComposedResourceObserver) FunctionComposerOp } } -// WithExtraResourcesFetcher configures how the FunctionComposer should fetch extra -// resources requested by functions. -func WithExtraResourcesFetcher(f ExtraResourcesFetcher) FunctionComposerOption { - return func(p *FunctionComposer) { - p.composite.ExtraResourcesFetcher = f - } -} - // WithComposedResourceGarbageCollector configures how the FunctionComposer should // garbage collect undesired composed resources. func WithComposedResourceGarbageCollector(d ComposedResourceGarbageCollector) FunctionComposerOption { @@ -213,13 +210,18 @@ func WithComposedResourceGarbageCollector(d ComposedResourceGarbageCollector) Fu } } +// WithManagedFieldsUpgrader configures how the FunctionComposer should upgrade +// composed resources managed fields from client-side apply to +// server-side apply. +func WithManagedFieldsUpgrader(u ManagedFieldsUpgrader) FunctionComposerOption { + return func(p *FunctionComposer) { + p.composite.ManagedFieldsUpgrader = u + } +} + // NewFunctionComposer returns a new Composer that supports composing resources using // both Patch and Transform (P&T) logic and a pipeline of Composition Functions. func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComposerOption) *FunctionComposer { - // TODO(negz): Can we avoid double-wrapping if the supplied client is - // already wrapped? Or just do away with unstructured.NewClient completely? - kube = unstructured.NewClient(kube) - f := NewSecretConnectionDetailsFetcher(kube) c := &FunctionComposer{ @@ -230,6 +232,7 @@ func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComp ComposedResourceObserver: NewExistingComposedResourceObserver(kube, f), ComposedResourceGarbageCollector: NewDeletingComposedResourceGarbageCollector(kube), NameGenerator: names.NewNameGenerator(kube), + ManagedFieldsUpgrader: NewPatchingManagedFieldsUpgrader(kube), }, pipeline: r, @@ -243,7 +246,7 @@ func NewFunctionComposer(kube client.Client, r FunctionRunner, o ...FunctionComp } // Compose resources using the Functions pipeline. -func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocyclo // We probably don't want any further abstraction for the sake of reduced complexity. +func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocognit // We probably don't want any further abstraction for the sake of reduced complexity. // Observe our existing composed resources. We need to do this before we // render any P&T templates, so that we can make sure we use the same // composed resource names (as in, metadata.name) every time. We know what @@ -271,9 +274,10 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur } // The Function pipeline starts with empty desired state. - d := &v1beta1.State{} + d := &fnv1.State{} - events := []event.Event{} + events := []TargetedEvent{} + conditions := []TargetedCondition{} // The Function context starts empty... fctx := &structpb.Struct{Fields: map[string]*structpb.Value{}} @@ -291,7 +295,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // the desired state returned by the last, and each Function may produce // results that will be emitted as events. for _, fn := range req.Revision.Spec.Pipeline { - req := &v1beta1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} + req := &fnv1.RunFunctionRequest{Observed: o, Desired: d, Context: fctx} if fn.Input != nil { in := &structpb.Struct{} @@ -301,55 +305,31 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur req.Input = in } - // Used to store the requirements returned at the previous iteration. - var requirements *v1beta1.Requirements - // Used to store the response of the function at the previous iteration. - var rsp *v1beta1.RunFunctionResponse - - for i := int64(0); i <= MaxRequirementsIterations; i++ { - if i == MaxRequirementsIterations { - // The requirements didn't stabilize after the maximum number of iterations. - return CompositionResult{}, errors.Errorf(errFmtFunctionMaxIterations, fn.Step, MaxRequirementsIterations) - } - - // TODO(negz): Generate a content-addressable tag for this request. - // Perhaps using https://github.com/cerbos/protoc-gen-go-hashpb ? - rsp, err = c.pipeline.RunFunction(ctx, fn.FunctionRef.Name, req) - if err != nil { - return CompositionResult{}, errors.Wrapf(err, errFmtRunPipelineStep, fn.Step) - } - - if c.composite.ExtraResourcesFetcher == nil { - // If we don't have an extra resources getter, we don't need to - // iterate to satisfy the requirements. - break + req.Credentials = map[string]*fnv1.Credentials{} + for _, cs := range fn.Credentials { + // For now we only support loading credentials from secrets. + if cs.Source != v1.FunctionCredentialsSourceSecret || cs.SecretRef == nil { + continue } - newRequirements := rsp.GetRequirements() - if reflect.DeepEqual(newRequirements, requirements) { - // The requirements stabilized, the function is done. - break + s := &corev1.Secret{} + if err := c.client.Get(ctx, client.ObjectKey{Namespace: cs.SecretRef.Namespace, Name: cs.SecretRef.Name}, s); err != nil { + return CompositionResult{}, errors.Wrapf(err, errFmtGetCredentialsFromSecret, fn.Step, cs.Name) } - - // Store the requirements for the next iteration. - requirements = newRequirements - - // Cleanup the extra resources from the previous iteration to store the new ones - req.ExtraResources = make(map[string]*v1beta1.Resources) - - // Fetch the requested resources and add them to the desired state. - for name, selector := range newRequirements.GetExtraResources() { - resources, err := c.composite.ExtraResourcesFetcher.Fetch(ctx, selector) - if err != nil { - return CompositionResult{}, errors.Wrapf(err, "fetching resources for %s", name) - } - - // Resources would be nil in case of not found resources. - req.ExtraResources[name] = resources + req.Credentials[cs.Name] = &fnv1.Credentials{ + Source: &fnv1.Credentials_CredentialData{ + CredentialData: &fnv1.CredentialData{ + Data: s.Data, + }, + }, } + } - // Pass down the updated context across iterations. - req.Context = rsp.GetContext() + // TODO(negz): Generate a content-addressable tag for this request. + // Perhaps using https://github.com/cerbos/protoc-gen-go-hashpb ? + rsp, err := c.pipeline.RunFunction(ctx, fn.FunctionRef.Name, req) + if err != nil { + return CompositionResult{}, errors.Wrapf(err, errFmtRunPipelineStep, fn.Step) } // Pass the desired state returned by this Function to the next one. @@ -359,22 +339,58 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // We intentionally discard/ignore this after the last Function runs. fctx = rsp.GetContext() + for _, c := range rsp.GetConditions() { + var status corev1.ConditionStatus + switch c.GetStatus() { + case fnv1.Status_STATUS_CONDITION_TRUE: + status = corev1.ConditionTrue + case fnv1.Status_STATUS_CONDITION_FALSE: + status = corev1.ConditionFalse + case fnv1.Status_STATUS_CONDITION_UNKNOWN, fnv1.Status_STATUS_CONDITION_UNSPECIFIED: + status = corev1.ConditionUnknown + } + + conditions = append(conditions, TargetedCondition{ + Condition: xpv1.Condition{ + Type: xpv1.ConditionType(c.GetType()), + Status: status, + LastTransitionTime: metav1.Now(), + Reason: xpv1.ConditionReason(c.GetReason()), + Message: c.GetMessage(), + }, + Target: convertTarget(c.GetTarget()), + }) + } + // Results of fatal severity stop the Composition process. Other results // are accumulated to be emitted as events by the Reconciler. for _, rs := range rsp.GetResults() { + reason := event.Reason(rs.GetReason()) + if reason == "" { + reason = reasonCompose + } + + e := TargetedEvent{Target: convertTarget(rs.GetTarget())} + switch rs.GetSeverity() { - case v1beta1.Severity_SEVERITY_FATAL: - return CompositionResult{}, errors.Errorf(errFmtFatalResult, fn.Step, rs.GetMessage()) - case v1beta1.Severity_SEVERITY_WARNING: - events = append(events, event.Warning(reasonCompose, errors.Errorf("Pipeline step %q: %s", fn.Step, rs.GetMessage()))) - case v1beta1.Severity_SEVERITY_NORMAL: - events = append(events, event.Normal(reasonCompose, fmt.Sprintf("Pipeline step %q: %s", fn.Step, rs.GetMessage()))) - case v1beta1.Severity_SEVERITY_UNSPECIFIED: + case fnv1.Severity_SEVERITY_FATAL: + return CompositionResult{Events: events, Conditions: conditions}, errors.Errorf(errFmtFatalResult, fn.Step, rs.GetMessage()) + case fnv1.Severity_SEVERITY_WARNING: + e.Event = event.Warning(reason, errors.New(rs.GetMessage())) + e.Detail = fmt.Sprintf("Pipeline step %q", fn.Step) + case fnv1.Severity_SEVERITY_NORMAL: + e.Event = event.Normal(reason, rs.GetMessage()) + e.Detail = fmt.Sprintf("Pipeline step %q", fn.Step) + case fnv1.Severity_SEVERITY_UNSPECIFIED: // We could hit this case if a Function was built against a newer // protobuf than this build of Crossplane, and the new protobuf // introduced a severity that we don't know about. - events = append(events, event.Warning(reasonCompose, errors.Errorf("Pipeline step %q returned a result of unknown severity (assuming warning): %s", fn.Step, rs.GetMessage()))) + e.Event = event.Warning(reason, errors.Errorf("Pipeline step %q returned a result of unknown severity (assuming warning): %s", fn.Step, rs.GetMessage())) + // Explicitly target only the XR, since we're including information + // about an exceptional, unexpected state. + e.Target = CompositionTargetComposite } + events = append(events, e) } } @@ -419,7 +435,7 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur desired[ResourceName(name)] = ComposedResourceState{ Resource: cd, ConnectionDetails: dr.GetConnectionDetails(), - Ready: dr.GetReady() == v1beta1.Ready_READY_TRUE, + Ready: dr.GetReady() == fnv1.Ready_READY_TRUE, } } @@ -454,28 +470,17 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur return CompositionResult{}, errors.Wrap(err, errApplyXRRefs) } - // Our goal here is to patch our XR's status using server-side apply. We - // want the resulting, patched object loaded into uxr. We need to pass in - // only our "fully specified intent" - i.e. only the fields that we actually - // care about. FromStruct will replace uxr's backing map[string]any with the - // content of GetResource (i.e. the desired status). We then need to set its - // GVK and name so that our client knows what resource to patch. - v := xr.GetAPIVersion() - k := xr.GetKind() - n := xr.GetName() - u := xr.GetUID() - if err := FromStruct(xr, d.GetComposite().GetResource()); err != nil { - return CompositionResult{}, errors.Wrap(err, errUnmarshalDesiredXRStatus) - } - xr.SetAPIVersion(v) - xr.SetKind(k) - xr.SetName(n) - xr.SetUID(u) - - // NOTE(phisco): Here we are fine using a hardcoded field owner as there is - // no risk of conflict between different XRs. - if err := c.client.Status().Patch(ctx, xr, client.Apply, client.ForceOwnership, client.FieldOwner(FieldOwnerXR)); err != nil { - return CompositionResult{}, errors.Wrap(err, errApplyXRStatus) + // TODO: Remove this call to Upgrade once no supported version of + // Crossplane have native P&T available. We only need to upgrade field managers if the + // native PTComposer might have applied the composed resources before, using the + // default client-side apply field manager "crossplane", + // but now migrated to use Composition functions, which uses server-side apply instead. + // Without this managedFields upgrade, the composed resources ends up having shared ownership + // of fields and field removals won't sync properly. + for _, cd := range observed { + if err := c.composite.ManagedFieldsUpgrader.Upgrade(ctx, cd.Resource); err != nil { + return CompositionResult{}, errors.Wrap(err, "cannot upgrade composed resource's managed fields from client-side to server-side apply") + } } // Produce our array of resources to return to the Reconciler. The @@ -495,13 +500,60 @@ func (c *FunctionComposer) Compose(ctx context.Context, xr *composite.Unstructur // this prevents multiple XRs composing the same resource to be // continuously alternated as controllers. if err := c.client.Patch(ctx, cd.Resource, client.Apply, client.ForceOwnership, client.FieldOwner(ComposedFieldOwnerName(xr))); err != nil { + if kerrors.IsInvalid(err) { + // We tried applying an invalid resource, we can't tell whether + // this means the resource will never be valid or it will if we + // run again the composition after some other resource is + // created or updated successfully. So, we emit a warning event + // and move on. + // We mark the resource as not synced, so that once we get to + // decide the XR's Synced condition, we can set it to false if + // any of the resources didn't sync successfully. + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyCD, name)), + Target: CompositionTargetComposite, + }) + // NOTE(phisco): here we behave differently w.r.t. the native + // p&t composer, as we respect the readiness reported by + // functions, while there we defaulted to also set ready false + // in case of apply errors. + resources = append(resources, ComposedResource{ResourceName: name, Ready: cd.Ready, Synced: false}) + continue + } return CompositionResult{}, errors.Wrapf(err, errFmtApplyCD, name) } - resources = append(resources, ComposedResource{ResourceName: name, Ready: cd.Ready}) + resources = append(resources, ComposedResource{ResourceName: name, Ready: cd.Ready, Synced: true}) + } + + // Our goal here is to patch our XR's status using server-side apply. We + // want the resulting, patched object loaded into uxr. We need to pass in + // only our "fully specified intent" - i.e. only the fields that we actually + // care about. FromStruct will replace uxr's backing map[string]any with the + // content of GetResource (i.e. the desired status). We then need to set its + // GVK and name so that our client knows what resource to patch. + v := xr.GetAPIVersion() + k := xr.GetKind() + n := xr.GetName() + u := xr.GetUID() + if err := FromStruct(xr, d.GetComposite().GetResource()); err != nil { + return CompositionResult{}, errors.Wrap(err, errUnmarshalDesiredXRStatus) + } + xr.SetAPIVersion(v) + xr.SetKind(k) + xr.SetName(n) + xr.SetUID(u) + + // NOTE(phisco): Here we are fine using a hardcoded field owner as there is + // no risk of conflict between different XRs. + if err := c.client.Status().Patch(ctx, xr, client.Apply, client.ForceOwnership, client.FieldOwner(FieldOwnerXR)); err != nil { + // Note(phisco): here we are fine with this error being terminal, as + // there is no other resource to apply that might eventually resolve + // this issue. + return CompositionResult{}, errors.Wrap(err, errApplyXRStatus) } - return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events}, nil + return CompositionResult{ConnectionDetails: d.GetComposite().GetConnectionDetails(), Composed: resources, Events: events, Conditions: conditions}, nil } // ComposedFieldOwnerName generates a unique field owner name @@ -527,68 +579,6 @@ func ComposedFieldOwnerName(xr *composite.Unstructured) string { return fmt.Sprintf("%s/%x", FieldOwnerComposedPrefix, h.Sum(nil)) } -// ExistingExtraResourcesFetcher fetches extra resources requested by -// functions using the provided client.Reader. -type ExistingExtraResourcesFetcher struct { - client client.Reader -} - -// NewExistingExtraResourcesFetcher returns a new ExistingExtraResourcesFetcher. -func NewExistingExtraResourcesFetcher(c client.Reader) *ExistingExtraResourcesFetcher { - return &ExistingExtraResourcesFetcher{client: c} -} - -// Fetch fetches resources requested by functions using the provided client.Reader. -func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { - if rs == nil { - return nil, errors.New(errNilResourceSelector) - } - switch match := rs.GetMatch().(type) { - case *v1beta1.ResourceSelector_MatchName: - // Fetch a single resource. - r := &kunstructured.Unstructured{} - r.SetAPIVersion(rs.GetApiVersion()) - r.SetKind(rs.GetKind()) - nn := types.NamespacedName{Name: rs.GetMatchName()} - err := e.client.Get(ctx, nn, r) - if kerrors.IsNotFound(err) { - // The resource doesn't exist. We'll return nil, which the Functions - // know means that the resource was not found. - return nil, nil - } - if err != nil { - return nil, errors.Wrap(err, errGetExtraResourceByName) - } - o, err := AsStruct(r) - if err != nil { - return nil, errors.Wrap(err, errExtraResourceAsStruct) - } - return &v1beta1.Resources{Items: []*v1beta1.Resource{{Resource: o}}}, nil - case *v1beta1.ResourceSelector_MatchLabels: - // Fetch a list of resources. - list := &kunstructured.UnstructuredList{} - list.SetAPIVersion(rs.GetApiVersion()) - list.SetKind(rs.GetKind()) - - if err := e.client.List(ctx, list, client.MatchingLabels(match.MatchLabels.GetLabels())); err != nil { - return nil, errors.Wrap(err, errListExtraResources) - } - - resources := make([]*v1beta1.Resource, len(list.Items)) - for i, r := range list.Items { - r := r - o, err := AsStruct(&r) - if err != nil { - return nil, errors.Wrap(err, errExtraResourceAsStruct) - } - resources[i] = &v1beta1.Resource{Resource: o} - } - - return &v1beta1.Resources{Items: resources}, nil - } - return nil, errors.New(errUnknownResourceSelector) -} - // An ExistingComposedResourceObserver uses an XR's resource references to load // any existing composed resources from the API server. It also loads their // connection details. @@ -660,25 +650,25 @@ func (g *ExistingComposedResourceObserver) ObserveComposedResources(ctx context. // AsState builds state for a RunFunctionRequest from the XR and composed // resources. -func AsState(xr resource.Composite, xc managed.ConnectionDetails, rs ComposedResourceStates) (*v1beta1.State, error) { +func AsState(xr resource.Composite, xc managed.ConnectionDetails, rs ComposedResourceStates) (*fnv1.State, error) { r, err := AsStruct(xr) if err != nil { return nil, errors.Wrap(err, errXRAsStruct) } - oxr := &v1beta1.Resource{Resource: r, ConnectionDetails: xc} + oxr := &fnv1.Resource{Resource: r, ConnectionDetails: xc} - ocds := make(map[string]*v1beta1.Resource) + ocds := make(map[string]*fnv1.Resource) for name, or := range rs { r, err := AsStruct(or.Resource) if err != nil { return nil, errors.Wrapf(err, errFmtCDAsStruct, name) } - ocds[string(name)] = &v1beta1.Resource{Resource: r, ConnectionDetails: or.ConnectionDetails} + ocds[string(name)] = &fnv1.Resource{Resource: r, ConnectionDetails: or.ConnectionDetails} } - return &v1beta1.State{Composite: oxr, Resources: ocds}, nil + return &fnv1.State{Composite: oxr, Resources: ocds}, nil } // AsStruct converts the supplied object to a protocol buffer Struct well-known @@ -753,9 +743,17 @@ func (d *DeletingComposedResourceGarbageCollector) GarbageCollectComposedResourc } for name, cd := range del { - // We want to garbage collect this resource, but we don't control it. - if c := metav1.GetControllerOf(cd.Resource); c == nil || c.UID != owner.GetUID() { - continue + // Don't garbage collect composed resources that someone else controls. + // + // We do garbage collect composed resources that no-one controls. If a + // composed resource appears in observed (i.e. appears in the XR's + // spec.resourceRefs) but doesn't have a controller ref, most likely we + // created it but its controller ref was stripped. In this situation it + // would be permissible for us to adopt the composed resource by setting + // our XR as the controller ref, then delete it. So we may as well just + // go straight to deleting it. + if c := metav1.GetControllerOf(cd.Resource); c != nil && c.UID != owner.GetUID() { + return errors.Errorf(errFmtControllerMismatch, name, c.Kind, c.Name) } if err := d.client.Delete(ctx, cd.Resource); resource.IgnoreNotFound(err) != nil { @@ -783,3 +781,92 @@ func UpdateResourceRefs(xr resource.ComposedResourcesReferencer, desired Compose xr.SetResourceReferences(refs) } + +// A PatchingManagedFieldsUpgrader uses a JSON patch to upgrade an object's +// managed fields from client-side to server-side apply. The upgrade is a no-op +// if the object does not need upgrading. +type PatchingManagedFieldsUpgrader struct { + client client.Writer +} + +// NewPatchingManagedFieldsUpgrader returns a ManagedFieldsUpgrader that uses a +// JSON patch to upgrade and object's managed fields from client-side to +// server-side apply. +func NewPatchingManagedFieldsUpgrader(w client.Writer) *PatchingManagedFieldsUpgrader { + return &PatchingManagedFieldsUpgrader{client: w} +} + +// Upgrade the supplied composed object's field managers from client-side to server-side +// apply. +// +// This is a multi-step process. +// +// Step 1: All fields are owned by manager 'crossplane' operation 'Update'. This +// represents all fields set by the XR controller up to this point. +// +// Step 2: Upgrade is called for the first time. We clear all field managers. +// +// Step 3: The XR controller server-side applies its fully specified intent +// as field manager with prefix 'apiextensions.crossplane.io/composed/'. This becomes the +// manager of all the fields that are part of the XR controller's fully +// specified intent. All existing fields the XR controller didn't specify +// become owned by a special manager - 'before-first-apply', operation 'Update'. +// +// Step 4: Upgrade is called for the second time. It deletes the +// 'before-first-apply' field manager entry. Only the XR composed field manager +// remains. +func (u *PatchingManagedFieldsUpgrader) Upgrade(ctx context.Context, obj client.Object) error { + // The composed resource doesn't exist, nothing to upgrade. + if !meta.WasCreated(obj) { + return nil + } + + foundSSA := false + foundBFA := false + idxBFA := -1 + + for i, e := range obj.GetManagedFields() { + if strings.HasPrefix(e.Manager, FieldOwnerComposedPrefix) { + foundSSA = true + } + if e.Manager == "before-first-apply" { + foundBFA = true + idxBFA = i + } + } + + switch { + // If our SSA field manager exists and the before-first-apply field manager + // doesn't, we've already done the upgrade. Don't do it again. + case foundSSA && !foundBFA: + return nil + + // We found our SSA field manager but also before-first-apply. It should now + // be safe to delete before-first-apply. + case foundSSA && foundBFA: + p := []byte(fmt.Sprintf(`[ + {"op": "remove", "path": "/metadata/managedFields/%d"}, + {"op": "replace", "path": "/metadata/resourceVersion", "value": "%s"} + ]`, idxBFA, obj.GetResourceVersion())) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot remove before-first-apply from field managers") + + // We didn't find our SSA field manager. This means we haven't started the + // upgrade. The first thing we want to do is clear all managed fields. + // After we do this we'll let our SSA field manager apply the fields it + // cares about. The result will be that our SSA field manager shares + // ownership with a new manager named 'before-first-apply'. + default: + p := []byte(fmt.Sprintf(`[ + {"op": "replace", "path": "/metadata/managedFields", "value": [{}]}, + {"op": "replace", "path": "/metadata/resourceVersion", "value": "%s"} + ]`, obj.GetResourceVersion())) + return errors.Wrap(resource.IgnoreNotFound(u.client.Patch(ctx, obj, client.RawPatch(types.JSONPatchType, p))), "cannot clear field managers") + } +} + +func convertTarget(t fnv1.Target) CompositionTarget { + if t == fnv1.Target_TARGET_COMPOSITE_AND_CLAIM { + return CompositionTargetCompositeAndClaim + } + return CompositionTargetComposite +} diff --git a/internal/controller/apiextensions/composite/composition_functions_test.go b/internal/controller/apiextensions/composite/composition_functions_test.go index 513f2e399..cb026d93d 100644 --- a/internal/controller/apiextensions/composite/composition_functions_test.go +++ b/internal/controller/apiextensions/composite/composition_functions_test.go @@ -28,13 +28,13 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/meta" @@ -45,7 +45,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -80,10 +80,10 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return any error encountered while fetching the XR's connection details.", params: params{ o: []FunctionComposerOption{ - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return ComposedResourceStates{}, nil })), - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom })), }, @@ -100,10 +100,10 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return any error encountered while getting the XR's existing composed resources.", params: params{ o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, errBoom })), }, @@ -120,10 +120,10 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return any error encountered while unmarshalling a Composition Function input", params: params{ o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -148,17 +148,62 @@ func TestFunctionCompose(t *testing.T) { err: errors.Wrapf(errProtoSyntax, errFmtUnmarshalPipelineStepInput, "run-cool-function"), }, }, + "GetCredentialsSecretError": { + reason: "We should return any error encountered while getting the credentials secret for a Composition Function", + params: params{ + kube: &test.MockClient{ + // Return an error when we try to get the secret. + MockGet: test.NewMockGetFn(errBoom), + }, + o: []FunctionComposerOption{ + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + return nil, nil + })), + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { + return nil, nil + })), + }, + }, + args: args{ + xr: composite.New(), + req: CompositionRequest{ + Revision: &v1.CompositionRevision{ + Spec: v1.CompositionRevisionSpec{ + Pipeline: []v1.PipelineStep{ + { + Step: "run-cool-function", + FunctionRef: v1.FunctionReference{Name: "cool-function"}, + Credentials: []v1.FunctionCredentials{ + { + Name: "cool-secret", + Source: v1.FunctionCredentialsSourceSecret, + SecretRef: &xpv1.SecretReference{ + Namespace: "default", + Name: "cool-secret", + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: want{ + err: errors.Wrapf(errBoom, errFmtGetCredentialsFromSecret, "run-cool-function", "cool-secret"), + }, + }, "RunFunctionError": { reason: "We should return any error encountered while running a Composition Function", params: params{ - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { return nil, errBoom }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -183,20 +228,63 @@ func TestFunctionCompose(t *testing.T) { }, }, "FatalFunctionResultError": { - reason: "We should return any fatal function results as an error", + reason: "We should return any fatal function results as an error. Any conditions returned by the function should be passed up. Any results returned by the function prior to the fatal result should be passed up.", params: params{ - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - r := &v1beta1.Result{ - Severity: v1beta1.Severity_SEVERITY_FATAL, - Message: "oh no", - } - return &v1beta1.RunFunctionResponse{Results: []*v1beta1.Result{r}}, nil + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + return &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ + // This result should be passed up as it was sent before the fatal + // result. The reason should be defaulted. The target should be + // defaulted. + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + Message: "A result before the fatal result with the default Reason.", + }, + // This result should be passed up as it was sent before the fatal + // result. The reason should be kept. The target should be kept. + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + Reason: ptr.To("SomeReason"), + Message: "A result before the fatal result with a specific Reason.", + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + // The fatal result + { + Severity: fnv1.Severity_SEVERITY_FATAL, + Message: "oh no", + }, + // This result should not be passed up as it was sent after the + // fatal result. + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + Message: "a result after the fatal result", + }, + }, + Conditions: []*fnv1.Condition{ + // A condition returned by the function with only the minimum + // necessary values. + { + Type: "DatabaseReady", + Status: fnv1.Status_STATUS_CONDITION_FALSE, + Reason: "Creating", + }, + // A condition returned by the function with all optional values + // given. + { + Type: "DeploymentReady", + Status: fnv1.Status_STATUS_CONDITION_TRUE, + Reason: "Available", + Message: ptr.To("The deployment is ready."), + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + }, + }, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -218,14 +306,59 @@ func TestFunctionCompose(t *testing.T) { }, want: want{ err: errors.Errorf(errFmtFatalResult, "run-cool-function", "oh no"), + res: CompositionResult{ + Events: []TargetedEvent{ + // The event with minimum values. + { + Event: event.Event{ + Type: "Normal", + Reason: "ComposeResources", + Message: "A result before the fatal result with the default Reason.", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, + }, + // The event that provides all possible values. + { + Event: event.Event{ + Type: "Normal", + Reason: "SomeReason", + Message: "A result before the fatal result with a specific Reason.", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + }, + Conditions: []TargetedCondition{ + // The condition with minimum values. + { + Condition: xpv1.Condition{ + Type: "DatabaseReady", + Status: "False", + Reason: "Creating", + }, + Target: CompositionTargetComposite, + }, + // The condition that provides all possible values. + { + Condition: xpv1.Condition{ + Type: "DeploymentReady", + Status: "True", + Reason: "Available", + Message: "The deployment is ready.", + }, + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, }, }, "RenderComposedResourceMetadataError": { reason: "We should return any error we encounter when rendering composed resource metadata", params: params{ - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Resources: map[string]*v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Resources: map[string]*fnv1.Resource{ "cool-resource": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -234,13 +367,13 @@ func TestFunctionCompose(t *testing.T) { }, }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -271,9 +404,9 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockGet: test.NewMockGetFn(errBoom), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Resources: map[string]*v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Resources: map[string]*fnv1.Resource{ "cool-resource": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -284,13 +417,13 @@ func TestFunctionCompose(t *testing.T) { }, }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), }, @@ -320,17 +453,17 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockPatch: test.NewMockPatchFn(nil), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - return &v1beta1.RunFunctionResponse{}, nil + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + return &fnv1.RunFunctionResponse{}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return errBoom })), }, @@ -360,24 +493,25 @@ func TestFunctionCompose(t *testing.T) { kube: &test.MockClient{ MockPatch: test.NewMockPatchFn(nil, func(obj client.Object) error { // We only want to return an error for the XR. - u := obj.(*kunstructured.Unstructured) - if u.GetKind() == "CoolComposed" { - return nil + switch obj.(type) { + case *composite.Unstructured: + return errBoom + default: } - return errBoom + return nil }), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - return &v1beta1.RunFunctionResponse{}, nil + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + return &fnv1.RunFunctionResponse{}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -408,25 +542,26 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(errBoom), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Composite: &v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStruct(map[string]any{ "status": map[string]any{ "widgets": 42, }, - })}, + }), + }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -458,17 +593,18 @@ func TestFunctionCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil, func(obj client.Object) error { // We only want to return an error if we're patching a // composed resource. - u := obj.(*kunstructured.Unstructured) - if u.GetKind() == "UncoolComposed" { + switch obj.(type) { + case *composed.Unstructured: return errBoom + default: } return nil }), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (rsp *v1beta1.RunFunctionResponse, err error) { - d := &v1beta1.State{ - Resources: map[string]*v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (rsp *fnv1.RunFunctionResponse, err error) { + d := &fnv1.State{ + Resources: map[string]*fnv1.Resource{ "uncool-resource": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", @@ -477,16 +613,16 @@ func TestFunctionCompose(t *testing.T) { }, }, } - return &v1beta1.RunFunctionResponse{Desired: d}, nil + return &fnv1.RunFunctionResponse{Desired: d}, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { return nil, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -514,14 +650,27 @@ func TestFunctionCompose(t *testing.T) { reason: "We should return a valid CompositionResult when a 'pure Function' (i.e. patch-and-transform-less) reconcile succeeds", params: params{ kube: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "UncoolComposed"}, "")), // all names are available + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if s, ok := obj.(*corev1.Secret); ok { + s.Data = map[string][]byte{ + "secret": []byte("password"), + } + return nil + } + + // If this isn't a secret, it's a composed resource. + // Return not found to indicate its name is available. + // TODO(negz): This is "testing through" to the + // names.NameGenerator implementation. Mock it out. + return kerrors.NewNotFound(schema.GroupResource{}, "") + }), MockPatch: test.NewMockPatchFn(nil), MockStatusPatch: test.NewMockSubResourcePatchFn(nil), }, - r: FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - rsp := &v1beta1.RunFunctionResponse{ - Desired: &v1beta1.State{ - Composite: &v1beta1.Resource{ + r: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Desired: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: MustStruct(map[string]any{ "status": map[string]any{ "widgets": 42, @@ -529,13 +678,13 @@ func TestFunctionCompose(t *testing.T) { }), ConnectionDetails: map[string][]byte{"from": []byte("function-pipeline")}, }, - Resources: map[string]*v1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "observed-resource-a": { Resource: MustStruct(map[string]any{ "apiVersion": "test.crossplane.io/v1", "kind": "CoolComposed", }), - Ready: v1beta1.Ready_READY_TRUE, + Ready: fnv1.Ready_READY_TRUE, }, "desired-resource-a": { Resource: MustStruct(map[string]any{ @@ -545,28 +694,52 @@ func TestFunctionCompose(t *testing.T) { }, }, }, - Results: []*v1beta1.Result{ + Results: []*fnv1.Result{ { - Severity: v1beta1.Severity_SEVERITY_NORMAL, + Severity: fnv1.Severity_SEVERITY_NORMAL, Message: "A normal result", }, { - Severity: v1beta1.Severity_SEVERITY_WARNING, + Severity: fnv1.Severity_SEVERITY_WARNING, Message: "A warning result", }, { - Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, + Severity: fnv1.Severity_SEVERITY_UNSPECIFIED, Message: "A result of unspecified severity", }, + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + Reason: ptr.To("SomeReason"), + Message: "A result with all values explicitly set.", + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, + }, + Conditions: []*fnv1.Condition{ + // A condition returned by the function with only the minimum + // necessary values. + { + Type: "DatabaseReady", + Status: fnv1.Status_STATUS_CONDITION_FALSE, + Reason: "Creating", + }, + // A condition returned by the function with all optional values + // given. + { + Type: "DeploymentReady", + Status: fnv1.Status_STATUS_CONDITION_TRUE, + Reason: "Available", + Message: ptr.To("The deployment is ready."), + Target: fnv1.Target_TARGET_COMPOSITE_AND_CLAIM.Enum(), + }, }, } return rsp, nil }), o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { + WithComposedResourceObserver(ComposedResourceObserverFn(func(_ context.Context, _ resource.Composite) (ComposedResourceStates, error) { // We only try to extract connection details for // observed resources. r := ComposedResourceStates{ @@ -578,7 +751,7 @@ func TestFunctionCompose(t *testing.T) { } return r, nil })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { + WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(_ context.Context, _ metav1.Object, _, _ ComposedResourceStates) error { return nil })), }, @@ -605,6 +778,16 @@ func TestFunctionCompose(t *testing.T) { { Step: "run-cool-function", FunctionRef: v1.FunctionReference{Name: "cool-function"}, + Credentials: []v1.FunctionCredentials{ + { + Name: "cool-secret", + Source: v1.FunctionCredentialsSourceSecret, + SecretRef: &xpv1.SecretReference{ + Namespace: "default", + Name: "cool-secret", + }, + }, + }, }, }, }, @@ -614,228 +797,68 @@ func TestFunctionCompose(t *testing.T) { want: want{ res: CompositionResult{ Composed: []ComposedResource{ - {ResourceName: "desired-resource-a"}, - {ResourceName: "observed-resource-a", Ready: true}, + {ResourceName: "desired-resource-a", Synced: true}, + {ResourceName: "observed-resource-a", Ready: true, Synced: true}, }, ConnectionDetails: managed.ConnectionDetails{ "from": []byte("function-pipeline"), }, - Events: []event.Event{ + Events: []TargetedEvent{ { - Type: "Normal", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A normal result", + Event: event.Event{ + Type: "Normal", + Reason: "ComposeResources", + Message: "A normal result", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, }, { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A warning result", + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "A warning result", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetComposite, }, { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", - }, - }, - }, - err: nil, - }, - }, - "SuccessfulWithExtraResources": { - reason: "We should return a valid CompositionResult when a 'pure Function' (i.e. patch-and-transform-less) reconcile succeeds after having requested some extra resource", - params: params{ - kube: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "UncoolComposed"}, "")), // all names are available - MockPatch: test.NewMockPatchFn(nil), - MockStatusPatch: test.NewMockSubResourcePatchFn(nil), - }, - r: func() FunctionRunner { - var nrCalls int - return FunctionRunnerFn(func(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { - defer func() { nrCalls++ }() - requirements := &v1beta1.Requirements{ - ExtraResources: map[string]*v1beta1.ResourceSelector{ - "existing": { - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "existing", - }, - }, - "missing": { - ApiVersion: "test.crossplane.io/v1", - Kind: "Bar", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "missing", - }, - }, - }, - } - rsp := &v1beta1.RunFunctionResponse{ - Desired: &v1beta1.State{ - Composite: &v1beta1.Resource{ - Resource: MustStruct(map[string]any{ - "status": map[string]any{ - "widgets": 42, - }, - }), - ConnectionDetails: map[string][]byte{"from": []byte("function-pipeline")}, - }, - Resources: map[string]*v1beta1.Resource{ - "observed-resource-a": { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "CoolComposed", - "spec": map[string]any{ - "someKey": req.GetInput().AsMap()["someKey"].(string), - }, - }), - Ready: v1beta1.Ready_READY_TRUE, - }, - "desired-resource-a": { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "CoolComposed", - }), - }, - }, - }, - Results: []*v1beta1.Result{ - { - Severity: v1beta1.Severity_SEVERITY_NORMAL, - Message: "A normal result", - }, - { - Severity: v1beta1.Severity_SEVERITY_WARNING, - Message: "A warning result", - }, - { - Severity: v1beta1.Severity_SEVERITY_UNSPECIFIED, - Message: "A result of unspecified severity", - }, - }, - Requirements: requirements, - } - - if nrCalls > 1 { - t.Fatalf("unexpected number of calls to FunctionRunner.RunFunction, should have been exactly 2: %d", nrCalls+1) - return nil, errBoom - } - - if nrCalls == 1 { - if len(req.GetExtraResources()) != 2 { - t.Fatalf("unexpected number of extra resources: %d", len(requirements.GetExtraResources())) - } - if rs := req.GetExtraResources()["missing"]; rs != nil && len(rs.GetItems()) != 0 { - t.Fatalf("unexpected extra resource, expected none, got: %v", rs) - } - if rs := req.GetExtraResources()["existing"]; rs == nil || len(rs.GetItems()) != 1 { - t.Fatalf("unexpected extra resource, expected one, got: %v", rs) - } - } - - return rsp, nil - }) - }(), - o: []FunctionComposerOption{ - WithCompositeConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { - return nil, nil - })), - WithComposedResourceObserver(ComposedResourceObserverFn(func(ctx context.Context, xr resource.Composite) (ComposedResourceStates, error) { - // We only try to extract connection details for - // observed resources. - r := ComposedResourceStates{ - "observed-resource-a": ComposedResourceState{ - Resource: &fake.Composed{ - ObjectMeta: metav1.ObjectMeta{Name: "observed-resource-a"}, - }, - }, - } - return r, nil - })), - WithComposedResourceGarbageCollector(ComposedResourceGarbageCollectorFn(func(ctx context.Context, owner metav1.Object, observed, desired ComposedResourceStates) error { - return nil - })), - WithExtraResourcesFetcher(ExtraResourcesFetcherFn(func(ctx context.Context, rs *v1beta1.ResourceSelector) (*v1beta1.Resources, error) { - if rs.GetMatchName() == "existing" { - return &v1beta1.Resources{ - Items: []*v1beta1.Resource{ - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "existing", - }, - "spec": map[string]any{ - "someField": "someValue", - }, - }), - }, - }, - }, nil - } - return nil, nil - })), - }, - }, - args: args{ - xr: func() *composite.Unstructured { - // Our XR needs a GVK to survive round-tripping through a - // protobuf struct (which involves using the Kubernetes-aware - // JSON unmarshaller that requires a GVK). - xr := composite.New(composite.WithGroupVersionKind(schema.GroupVersionKind{ - Group: "test.crossplane.io", - Version: "v1", - Kind: "CoolComposite", - })) - xr.SetLabels(map[string]string{ - xcrd.LabelKeyNamePrefixForComposed: "parent-xr", - }) - return xr - }(), - req: CompositionRequest{ - Revision: &v1.CompositionRevision{ - Spec: v1.CompositionRevisionSpec{ - Pipeline: []v1.PipelineStep{ - { - Step: "run-cool-function", - FunctionRef: v1.FunctionReference{Name: "cool-function"}, - Input: &runtime.RawExtension{Raw: []byte(`{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Input", - "someKey": "someValue" - }`)}, - }, + Event: event.Event{ + Type: "Warning", + Reason: "ComposeResources", + Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", }, + Target: CompositionTargetComposite, }, - }, - }, - }, - want: want{ - res: CompositionResult{ - Composed: []ComposedResource{ - {ResourceName: "desired-resource-a"}, - {ResourceName: "observed-resource-a", Ready: true}, - }, - ConnectionDetails: managed.ConnectionDetails{ - "from": []byte("function-pipeline"), - }, - Events: []event.Event{ { - Type: "Normal", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A normal result", + Event: event.Event{ + Type: "Normal", + Reason: "SomeReason", + Message: "A result with all values explicitly set.", + }, + Detail: "Pipeline step \"run-cool-function\"", + Target: CompositionTargetCompositeAndClaim, }, + }, + Conditions: []TargetedCondition{ + // The condition with minimum values. { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\": A warning result", + Condition: xpv1.Condition{ + Type: "DatabaseReady", + Status: "False", + Reason: "Creating", + }, + Target: CompositionTargetComposite, }, + // The condition that provides all possible values. { - Type: "Warning", - Reason: "ComposeResources", - Message: "Pipeline step \"run-cool-function\" returned a result of unknown severity (assuming warning): A result of unspecified severity", + Condition: xpv1.Condition{ + Type: "DeploymentReady", + Status: "True", + Reason: "Available", + Message: "The deployment is ready.", + }, + Target: CompositionTargetCompositeAndClaim, }, }, }, @@ -846,7 +869,6 @@ func TestFunctionCompose(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - c := NewFunctionComposer(tc.params.kube, tc.params.r, tc.params.o...) res, err := c.Compose(tc.args.ctx, tc.args.xr, tc.args.req) @@ -1019,7 +1041,7 @@ func TestGetComposedResources(t *testing.T) { return nil }), }, - f: ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + f: ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom }), }, @@ -1049,7 +1071,7 @@ func TestGetComposedResources(t *testing.T) { return nil }), }, - f: ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + f: ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return details, nil }), }, @@ -1067,17 +1089,18 @@ func TestGetComposedResources(t *testing.T) { }, }, want: want{ - ors: ComposedResourceStates{"cool-resource": ComposedResourceState{ - ConnectionDetails: details, - Resource: func() resource.Composed { - cd := composed.New() - cd.SetAPIVersion("example.org/v1") - cd.SetKind("Composed") - cd.SetName("cool-resource-42") - SetCompositionResourceName(cd, "cool-resource") - return cd - }(), - }, + ors: ComposedResourceStates{ + "cool-resource": ComposedResourceState{ + ConnectionDetails: details, + Resource: func() resource.Composed { + cd := composed.New() + cd.SetAPIVersion("example.org/v1") + cd.SetKind("Composed") + cd.SetName("cool-resource-42") + SetCompositionResourceName(cd, "cool-resource") + return cd + }(), + }, }, }, }, @@ -1085,7 +1108,6 @@ func TestGetComposedResources(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - g := NewExistingComposedResourceObserver(tc.params.c, tc.params.f) ors, err := g.ObserveComposedResources(tc.args.ctx, tc.args.xr) @@ -1107,7 +1129,7 @@ func TestAsState(t *testing.T) { rs ComposedResourceStates } type want struct { - d *v1beta1.State + d *fnv1.State err error } @@ -1136,15 +1158,15 @@ func TestAsState(t *testing.T) { }, }, want: want{ - d: &v1beta1.State{ - Composite: &v1beta1.Resource{ + d: &fnv1.State{ + Composite: &fnv1.Resource{ Resource: &structpb.Struct{Fields: map[string]*structpb.Value{ "apiVersion": structpb.NewStringValue("example.org/v1"), "kind": structpb.NewStringValue("Composite"), }}, ConnectionDetails: map[string][]byte{"a": []byte("b")}, }, - Resources: map[string]*v1beta1.Resource{ + Resources: map[string]*fnv1.Resource{ "cool-resource": { Resource: &structpb.Struct{Fields: map[string]*structpb.Value{ "apiVersion": structpb.NewStringValue("example.org/v2"), @@ -1214,11 +1236,21 @@ func TestGarbageCollectComposedResources(t *testing.T) { }, }, observed: ComposedResourceStates{ - "undesired-resource": ComposedResourceState{Resource: &fake.Composed{}}, + "undesired-resource": ComposedResourceState{Resource: &fake.Composed{ + ObjectMeta: metav1.ObjectMeta{ + // This resource isn't controlled by the XR. + OwnerReferences: []metav1.OwnerReference{{ + Controller: ptr.To(true), + UID: "a-different-xr", + Kind: "XR", + Name: "different", + }}, + }, + }}, }, }, want: want{ - err: nil, + err: errors.New(`refusing to delete composed resource "undesired-resource" that is controlled by XR "different"`), }, }, "DeleteError": { @@ -1323,7 +1355,6 @@ func TestGarbageCollectComposedResources(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - d := NewDeletingComposedResourceGarbageCollector(tc.params.client) err := d.GarbageCollectComposedResources(tc.args.ctx, tc.args.owner, tc.args.observed, tc.args.desired) @@ -1393,216 +1424,11 @@ func TestUpdateResourceRefs(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - UpdateResourceRefs(tc.args.xr, tc.args.drs) if diff := cmp.Diff(tc.want.xr, tc.args.xr); diff != "" { t.Errorf("\n%s\nUpdateResourceRefs(...): -want, +got:\n%s", tc.reason, diff) } - - }) - } -} - -func TestExistingExtraResourcesFetcherFetch(t *testing.T) { - errBoom := errors.New("boom") - - type args struct { - rs *v1beta1.ResourceSelector - c client.Reader - } - type want struct { - res *v1beta1.Resources - err error - } - cases := map[string]struct { - reason string - args args - want want - }{ - "SuccessMatchName": { - reason: "We should return a valid Resources when a resource is found by name", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "cool-resource", - }, - }, - c: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - obj.SetName("cool-resource") - return nil - }), - }, - }, - want: want{ - res: &v1beta1.Resources{ - Items: []*v1beta1.Resource{ - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "cool-resource", - }, - }), - }, - }, - }, - }, - }, - "SuccessMatchLabels": { - reason: "We should return a valid Resources when a resource is found by labels", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &v1beta1.MatchLabels{ - Labels: map[string]string{ - "cool": "resource", - }, - }, - }, - }, - c: &test.MockClient{ - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ - { - Object: map[string]interface{}{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]interface{}{ - "name": "cool-resource", - "labels": map[string]interface{}{ - "cool": "resource", - }, - }, - }, - }, - { - Object: map[string]interface{}{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]interface{}{ - "name": "cooler-resource", - "labels": map[string]interface{}{ - "cool": "resource", - }, - }, - }, - }, - } - return nil - }), - }, - }, - want: want{ - res: &v1beta1.Resources{ - Items: []*v1beta1.Resource{ - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "cool-resource", - "labels": map[string]any{ - "cool": "resource", - }, - }, - }), - }, - { - Resource: MustStruct(map[string]any{ - "apiVersion": "test.crossplane.io/v1", - "kind": "Foo", - "metadata": map[string]any{ - "name": "cooler-resource", - "labels": map[string]any{ - "cool": "resource", - }, - }, - }), - }, - }, - }, - }, - }, - "NotFoundMatchName": { - reason: "We should return no error when a resource is not found by name", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "cool-resource", - }, - }, - c: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "Foo"}, "cool-resource")), - }, - }, - want: want{ - res: nil, - err: nil, - }, - }, - // NOTE(phisco): No NotFound error is returned when listing resources by labels, so there is no NotFoundMatchLabels test case. - "ErrorMatchName": { - reason: "We should return any other error encountered when getting a resource by name", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchName{ - MatchName: "cool-resource", - }, - }, - c: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }, - want: want{ - res: nil, - err: errBoom, - }, - }, - "ErrorMatchLabels": { - reason: "We should return any other error encountered when listing resources by labels", - args: args{ - rs: &v1beta1.ResourceSelector{ - ApiVersion: "test.crossplane.io/v1", - Kind: "Foo", - Match: &v1beta1.ResourceSelector_MatchLabels{ - MatchLabels: &v1beta1.MatchLabels{ - Labels: map[string]string{ - "cool": "resource", - }, - }, - }, - }, - c: &test.MockClient{ - MockList: test.NewMockListFn(errBoom), - }, - }, - want: want{ - res: nil, - err: errBoom, - }, - }, - } - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - g := NewExistingExtraResourcesFetcher(tc.args.c) - res, err := g.Fetch(context.Background(), tc.args.rs) - if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { - t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.res, res, cmpopts.IgnoreUnexported(v1beta1.Resources{}, v1beta1.Resource{}, structpb.Struct{}, structpb.Value{})); diff != "" { - t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) - } }) } } diff --git a/internal/controller/apiextensions/composite/composition_patches.go b/internal/controller/apiextensions/composite/composition_patches.go index 568d2b967..85d4e8512 100644 --- a/internal/controller/apiextensions/composite/composition_patches.go +++ b/internal/controller/apiextensions/composite/composition_patches.go @@ -95,7 +95,7 @@ func ApplyToObjects(p v1.Patch, cp, cd runtime.Object, only ...v1.PatchType) err return errors.Errorf(errFmtInvalidPatchType, p.Type) } -// filterPatch returns true if patch should be filtered (not applied) +// filterPatch returns true if patch should be filtered (not applied). func filterPatch(p v1.Patch, only ...v1.PatchType) bool { // filter does not apply if not set if len(only) == 0 { diff --git a/internal/controller/apiextensions/composite/composition_patches_test.go b/internal/controller/apiextensions/composite/composition_patches_test.go index ef470ad6b..b4cf87f78 100644 --- a/internal/controller/apiextensions/composite/composition_patches_test.go +++ b/internal/controller/apiextensions/composite/composition_patches_test.go @@ -460,7 +460,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -502,7 +503,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -545,7 +547,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -635,7 +638,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -682,7 +686,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.Errorf(errFmtRequiredField, "Combine", v1.PatchTypeCombineFromComposite), }, @@ -729,7 +734,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.Errorf(errFmtRequiredField, "Combine", v1.PatchTypeCombineFromEnvironment), }, @@ -783,7 +789,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.Errorf(errFmtCombineConfigMissing, v1.CombineStrategyString), }, @@ -835,7 +842,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: errors.New(errCombineRequiresVariables), }, @@ -894,7 +902,8 @@ func TestPatchApply(t *testing.T) { Name: "cd", Labels: map[string]string{ "Test": "blah", - }}, + }, + }, }, err: nil, }, @@ -950,7 +959,8 @@ func TestPatchApply(t *testing.T) { Labels: map[string]string{ "Test": "blah", "destination": "foo-bar", - }}, + }, + }, }, err: nil, }, @@ -1006,7 +1016,8 @@ func TestPatchApply(t *testing.T) { Labels: map[string]string{ "source1": "foo", "source2": "bar", - }}, + }, + }, }, err: nil, }, @@ -1062,7 +1073,8 @@ func TestPatchApply(t *testing.T) { Labels: map[string]string{ "source1": "foo", "source2": "bar", - }}, + }, + }, }, err: nil, }, diff --git a/internal/controller/apiextensions/composite/composition_pt.go b/internal/controller/apiextensions/composite/composition_pt.go index cc8b1eedb..394381baa 100644 --- a/internal/controller/apiextensions/composite/composition_pt.go +++ b/internal/controller/apiextensions/composite/composition_pt.go @@ -32,7 +32,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" @@ -41,14 +40,14 @@ import ( "github.com/crossplane/crossplane/internal/names" ) -// Error strings +// Error strings. const ( - errGetComposed = "cannot get composed resource" - errGCComposed = "cannot garbage collect composed resource" - errApplyComposed = "cannot apply composed resource" - errFetchDetails = "cannot fetch connection details" - errInline = "cannot inline Composition patch sets" + errGetComposed = "cannot get composed resource" + errGCComposed = "cannot garbage collect composed resource" + errFetchDetails = "cannot fetch connection details" + errInline = "cannot inline Composition patch sets" + errFmtApplyComposed = "cannot apply composed resource %q" errFmtPatchEnvironment = "cannot apply environment patch at index %d" errFmtParseBase = "cannot parse base template of composed resource %q" errFmtRenderFromCompositePatches = "cannot render FromComposite or environment patches for composed resource %q" @@ -126,10 +125,6 @@ type PTComposer struct { // NewPTComposer returns a Composer that composes resources using Patch and // Transform (P&T) Composition - a Composition's bases, patches, and transforms. func NewPTComposer(kube client.Client, o ...PTComposerOption) *PTComposer { - // TODO(negz): Can we avoid double-wrapping if the supplied client is - // already wrapped? Or just do away with unstructured.NewClient completely? - kube = unstructured.NewClient(kube) - c := &PTComposer{ client: resource.ClientApplicator{Client: kube, Applicator: resource.NewAPIPatchingApplicator(kube)}, @@ -159,7 +154,7 @@ func NewPTComposer(kube client.Client, o ...PTComposerOption) *PTComposer { // 3. Apply all composed resources that rendered successfully. // 4. Observe the readiness and connection details of all composed resources // that rendered successfully. -func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocyclo // Breaking this up doesn't seem worth yet more layers of abstraction. +func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { //nolint:gocognit // Breaking this up doesn't seem worth yet more layers of abstraction. // Inline PatchSets before composing resources. ct, err := ComposedTemplates(req.Revision.Spec.PatchSets, req.Revision.Spec.Resources) if err != nil { @@ -188,7 +183,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re } } - events := make([]event.Event, 0) + events := make([]TargetedEvent, 0) // We optimistically render all composed resources that we are able to with // the expectation that any that we fail to render will subsequently have @@ -218,17 +213,26 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re rendered := true if err := RenderFromCompositeAndEnvironmentPatches(r, xr, req.Environment, ta.Template.Patches); err != nil { - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderFromCompositePatches, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderFromCompositePatches, name)), + Target: CompositionTargetComposite, + }) rendered = false } if err := RenderComposedResourceMetadata(r, xr, ResourceName(ptr.Deref(ta.Template.Name, ""))); err != nil { - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderMetadata, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtRenderMetadata, name)), + Target: CompositionTargetComposite, + }) rendered = false } if err := c.composed.GenerateName(ctx, r); err != nil { - events = append(events, event.Warning(reasonCompose, errors.Wrapf(err, errFmtGenerateName, name))) + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtGenerateName, name)), + Target: CompositionTargetComposite, + }) rendered = false } @@ -273,10 +277,28 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re o := []resource.ApplyOption{resource.MustBeControllableBy(xr.GetUID()), usage.RespectOwnerRefs()} o = append(o, mergeOptions(filterPatches(t.Patches, patchTypesFromXR()...))...) if err := c.client.Apply(ctx, cd, o...); err != nil { + if kerrors.IsInvalid(err) { + // We tried applying an invalid resource, we can't tell whether + // this means the resource will never be valid or it will if we + // run again the composition after some other resource is + // created or updated successfully. So, we emit a warning event + // and move on. + events = append(events, TargetedEvent{ + Event: event.Warning(reasonCompose, errors.Wrapf(err, errFmtApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1)))), + Target: CompositionTargetComposite, + }) + // We unset the cd here so that we don't try to observe it + // later. This will also mean we report it as not ready and not + // synced. Resulting in the XR being reported as not ready nor + // synced too. + cds[i] = nil + continue + } + // TODO(negz): Include the template name (if any) in this error. // Including the rendered resource's kind may help too (e.g. if the // template is anonymous). - return CompositionResult{}, errors.Wrap(err, errApplyComposed) + return CompositionResult{}, errors.Wrapf(err, errFmtApplyComposed, ptr.Deref(t.Name, fmt.Sprintf("%d", i+1))) } } @@ -298,7 +320,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // to observe it. We still want to return it to the Reconciler so that // it knows that this desired composed resource is not ready. if cd == nil { - resources[i] = ComposedResource{ResourceName: name, Ready: false} + resources[i] = ComposedResource{ResourceName: name, Synced: false, Ready: false} continue } @@ -328,7 +350,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re return CompositionResult{}, errors.Wrapf(err, errFmtCheckReadiness, name) } - resources[i] = ComposedResource{ResourceName: name, Ready: ready} + resources[i] = ComposedResource{ResourceName: name, Ready: ready, Synced: true} } // Call Apply so that we do not just replace fields on existing XR but @@ -344,7 +366,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // be rejected by the API server. This will trigger an immediate requeue, // and we'll proceed to update the status as soon as there are no changes to // be made to the spec. - objCopy := xr.DeepCopyObject().(client.Object) + objCopy := xr.DeepCopy() if err := c.client.Apply(ctx, objCopy, mergeOptions(toXRPatchesFromTAs(tas))...); err != nil { return CompositionResult{}, errors.Wrap(err, errUpdate) } @@ -354,7 +376,7 @@ func (c *PTComposer) Compose(ctx context.Context, xr *composite.Unstructured, re // toXRPatchesFromTAs selects patches defined in composed templates, // whose type is one of the XR-targeting patches -// (e.g. v1.PatchTypeToCompositeFieldPath or v1.PatchTypeCombineToComposite) +// (e.g. v1.PatchTypeToCompositeFieldPath or v1.PatchTypeCombineToComposite). func toXRPatchesFromTAs(tas []TemplateAssociation) []v1.Patch { filtered := make([]v1.Patch, 0, len(tas)) for _, ta := range tas { @@ -364,7 +386,7 @@ func toXRPatchesFromTAs(tas []TemplateAssociation) []v1.Patch { return filtered } -// filterPatches selects patches whose type belong to the list onlyTypes +// filterPatches selects patches whose type belong to the list onlyTypes. func filterPatches(pas []v1.Patch, onlyTypes ...v1.PatchType) []v1.Patch { filtered := make([]v1.Patch, 0, len(pas)) include := make(map[v1.PatchType]bool) @@ -401,7 +423,7 @@ func AssociateByOrder(t []v1.ComposedTemplate, r []corev1.ObjectReference) []Tem j = len(r) } - for i := 0; i < j; i++ { + for i := range j { a[i].Reference = r[i] } @@ -410,7 +432,7 @@ func AssociateByOrder(t []v1.ComposedTemplate, r []corev1.ObjectReference) []Tem // A CompositionTemplateAssociator returns an array of template associations. type CompositionTemplateAssociator interface { - AssociateTemplates(context.Context, resource.Composite, []v1.ComposedTemplate) ([]TemplateAssociation, error) + AssociateTemplates(ctx context.Context, xr resource.Composite, cts []v1.ComposedTemplate) ([]TemplateAssociation, error) } // A CompositionTemplateAssociatorFn returns an array of template associations. @@ -439,7 +461,7 @@ func NewGarbageCollectingAssociator(c client.Client) *GarbageCollectingAssociato } // AssociateTemplates with composed resources. -func (a *GarbageCollectingAssociator) AssociateTemplates(ctx context.Context, cr resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { //nolint:gocyclo // Only slightly over (13). +func (a *GarbageCollectingAssociator) AssociateTemplates(ctx context.Context, cr resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { templates := map[ResourceName]int{} for i, t := range ct { if t.Name == nil { @@ -494,9 +516,17 @@ func (a *GarbageCollectingAssociator) AssociateTemplates(ctx context.Context, cr continue } - // We want to garbage collect this resource, but we don't control it. - if c := metav1.GetControllerOf(cd); c == nil || c.UID != cr.GetUID() { - continue + // Don't garbage collect composed resources that someone else controls. + // + // We do garbage collect composed resources that no-one controls. If a + // composed resource appears in observed (i.e. appears in the XR's + // spec.resourceRefs) but doesn't have a controller ref, most likely we + // created it but its controller ref was stripped. In this situation it + // would be permissible for us to adopt the composed resource by setting + // our XR as the controller ref, then delete it. So we may as well just + // go straight to deleting it. + if c := metav1.GetControllerOf(cd); c != nil && c.UID != cr.GetUID() { + return nil, errors.Errorf(errFmtControllerMismatch, name, c.Kind, c.Name) } // This existing resource does not correspond to an extant template. It diff --git a/internal/controller/apiextensions/composite/composition_pt_test.go b/internal/controller/apiextensions/composite/composition_pt_test.go index a8147db1a..04c6f8440 100644 --- a/internal/controller/apiextensions/composite/composition_pt_test.go +++ b/internal/controller/apiextensions/composite/composition_pt_test.go @@ -94,7 +94,7 @@ func TestPTCompose(t *testing.T) { reason: "We should return any error encountered while associating Composition templates with composed resources.", params: params{ o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { return nil, errBoom })), }, @@ -112,7 +112,7 @@ func TestPTCompose(t *testing.T) { reason: "We should return any error encountered while parsing a composed resource base template", params: params{ o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{ { Template: v1.ComposedTemplate{ @@ -123,14 +123,14 @@ func TestPTCompose(t *testing.T) { } return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return details, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return true, nil })), }, @@ -152,7 +152,7 @@ func TestPTCompose(t *testing.T) { MockUpdate: test.NewMockUpdateFn(errBoom), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -161,7 +161,7 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), }, }, args: args{ @@ -184,7 +184,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(errBoom), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -193,7 +193,7 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), }, }, args: args{ @@ -203,7 +203,7 @@ func TestPTCompose(t *testing.T) { }, }, want: want{ - err: errors.Wrap(errors.Wrap(errBoom, "cannot create object"), errApplyComposed), + err: errors.Wrapf(errors.Wrap(errBoom, "cannot create object"), errFmtApplyComposed, "cool-resource"), }, }, "FetchConnectionDetailsError": { @@ -216,7 +216,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -225,8 +225,8 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom })), }, @@ -251,7 +251,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -260,11 +260,11 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return nil, errBoom })), }, @@ -289,7 +289,7 @@ func TestPTCompose(t *testing.T) { MockCreate: test.NewMockCreateFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -298,14 +298,14 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, cd resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return false, errBoom })), }, @@ -333,7 +333,7 @@ func TestPTCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { return nil, nil })), }, @@ -360,7 +360,7 @@ func TestPTCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{{ Template: v1.ComposedTemplate{ Name: ptr.To("cool-resource"), @@ -369,14 +369,14 @@ func TestPTCompose(t *testing.T) { }} return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, _ resource.Object) error { return nil })), + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return details, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return true, nil })), }, @@ -392,6 +392,7 @@ func TestPTCompose(t *testing.T) { Composed: []ComposedResource{{ ResourceName: "cool-resource", Ready: true, + Synced: true, }}, ConnectionDetails: details, }, @@ -409,7 +410,7 @@ func TestPTCompose(t *testing.T) { MockPatch: test.NewMockPatchFn(nil), }, o: []PTComposerOption{ - WithTemplateAssociator(CompositionTemplateAssociatorFn(func(ctx context.Context, c resource.Composite, ct []v1.ComposedTemplate) ([]TemplateAssociation, error) { + WithTemplateAssociator(CompositionTemplateAssociatorFn(func(_ context.Context, _ resource.Composite, _ []v1.ComposedTemplate) ([]TemplateAssociation, error) { tas := []TemplateAssociation{ { Template: v1.ComposedTemplate{ @@ -428,19 +429,19 @@ func TestPTCompose(t *testing.T) { } return tas, nil })), - WithComposedNameGenerator(names.NameGeneratorFn(func(ctx context.Context, cd resource.Object) error { + WithComposedNameGenerator(names.NameGeneratorFn(func(_ context.Context, cd resource.Object) error { if cd.GetObjectKind().GroupVersionKind().Kind == "BrokenResource" { return errBoom } return nil })), - WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsFetcher(ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, nil })), - WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(cd resource.Composed, conn managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { + WithComposedConnectionDetailsExtractor(ConnectionDetailsExtractorFn(func(_ resource.Composed, _ managed.ConnectionDetails, _ ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { return details, nil })), - WithComposedReadinessChecker(ReadinessCheckerFn(func(ctx context.Context, o ConditionedObject, rc ...ReadinessCheck) (ready bool, err error) { + WithComposedReadinessChecker(ReadinessCheckerFn(func(_ context.Context, _ ConditionedObject, _ ...ReadinessCheck) (ready bool, err error) { return true, nil })), }, @@ -457,15 +458,20 @@ func TestPTCompose(t *testing.T) { { ResourceName: "cool-resource", Ready: true, + Synced: true, }, { ResourceName: "uncool-resource", Ready: false, + Synced: false, }, }, ConnectionDetails: details, - Events: []event.Event{ - event.Warning(reasonCompose, errors.Wrapf(errBoom, errFmtGenerateName, "uncool-resource")), + Events: []TargetedEvent{ + { + Event: event.Warning(reasonCompose, errors.Wrapf(errBoom, errFmtGenerateName, "uncool-resource")), + Target: CompositionTargetComposite, + }, }, }, }, @@ -474,7 +480,6 @@ func TestPTCompose(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - c := NewPTComposer(tc.params.kube, tc.params.o...) res, err := c.Compose(tc.args.ctx, tc.args.xr, tc.args.req) @@ -485,7 +490,6 @@ func TestPTCompose(t *testing.T) { if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nCompose(...): -want error, +got error:\n%s", tc.reason, diff) } - }) } } @@ -657,6 +661,8 @@ func TestGarbageCollectingAssociator(t *testing.T) { Controller: &ctrl, BlockOwnerDeletion: &ctrl, UID: types.UID("who-dat"), + Kind: "XR", + Name: "different", }}) return nil }), @@ -669,11 +675,11 @@ func TestGarbageCollectingAssociator(t *testing.T) { ct: []v1.ComposedTemplate{t0}, }, want: want{ - tas: []TemplateAssociation{{Template: t0}}, + err: errors.New(`refusing to delete composed resource "unknown" that is controlled by XR "different"`), }, }, "ResourceNotControlled": { - reason: "We should not garbage collect a resource that has no controller reference.", + reason: "We should garbage collect a resource that has no controller reference.", c: &test.MockClient{ MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { // The template used to create this resource is no longer known to us. @@ -682,6 +688,7 @@ func TestGarbageCollectingAssociator(t *testing.T) { // This resource is not controlled by anyone. return nil }), + MockDelete: test.NewMockDeleteFn(nil), }, args: args{ cr: &fake.Composite{ diff --git a/internal/controller/apiextensions/composite/composition_render.go b/internal/controller/apiextensions/composite/composition_render.go index f43c5c80a..ee7e2dd02 100644 --- a/internal/controller/apiextensions/composite/composition_render.go +++ b/internal/controller/apiextensions/composite/composition_render.go @@ -26,14 +26,14 @@ import ( "github.com/crossplane/crossplane/internal/xcrd" ) -// Error strings +// Error strings. const ( errUnmarshalJSON = "cannot unmarshal JSON data" errMarshalProtoStruct = "cannot marshal protobuf Struct to JSON" errSetControllerRef = "cannot set controller reference" - errFmtKindOrGroupChanged = "cannot change the kind or group of a composed resource from %s to %s (possible composed resource template mismatch)" - errFmtNamePrefixLabel = "cannot find top-level composite resource name label %q in composite resource metadata" + errFmtKindChanged = "cannot change the kind of a composed resource from %s to %s (possible composed resource template mismatch)" + errFmtNamePrefixLabel = "cannot find top-level composite resource name label %q in composite resource metadata" // TODO(negz): Include more detail such as field paths if they exist. // Perhaps require each patch type to have a String() method to help @@ -61,16 +61,18 @@ func RenderFromJSON(o resource.Object, data []byte) error { o.SetName(name) o.SetNamespace(namespace) - // This resource already had a GK (probably because it already exists), but + // This resource already had a Kind (probably because it already exists), but // when we rendered its template it changed. This shouldn't happen. Either - // someone changed the kind or group in the template, or we're trying to use the + // someone changed the kind in the template, or we're trying to use the // wrong template (e.g. because the order of an array of anonymous templates // changed). // Please note, we don't check for version changes, as versions can change. For example, // if a composed resource was created with a template that has a version of "v1alpha1", // and then the template is updated to "v1beta1", the composed resource will still be valid. - if !gvk.Empty() && o.GetObjectKind().GroupVersionKind().GroupKind() != gvk.GroupKind() { - return errors.Errorf(errFmtKindOrGroupChanged, gvk, o.GetObjectKind().GroupVersionKind()) + // We also don't check for group changes, as groups can change during + // migrations. + if !gvk.Empty() && o.GetObjectKind().GroupVersionKind().Kind != gvk.Kind { + return errors.Errorf(errFmtKindChanged, gvk, o.GetObjectKind().GroupVersionKind()) } return nil diff --git a/internal/controller/apiextensions/composite/composition_render_test.go b/internal/controller/apiextensions/composite/composition_render_test.go index 59697114d..a2dd96f24 100644 --- a/internal/controller/apiextensions/composite/composition_render_test.go +++ b/internal/controller/apiextensions/composite/composition_render_test.go @@ -36,7 +36,7 @@ import ( ) func TestRenderFromJSON(t *testing.T) { - errInvalidChar := json.Unmarshal([]byte("olala"), &fake.Composed{}) + errInvalidChar := json.Unmarshal([]byte("olala"), &fake.Composed{}) //nolint:musttag // Not an issue in this test. type args struct { o resource.Object @@ -62,38 +62,37 @@ func TestRenderFromJSON(t *testing.T) { err: errors.Wrap(errInvalidChar, errUnmarshalJSON), }, }, - "ExistingGroupChanged": { - reason: "We should return an error if unmarshalling the base template changed the composed resource's group.", + "ExistingKindChanged": { + reason: "We should return an error if unmarshalling the base template changed the composed resource's kind.", args: args{ o: composed.New(composed.FromReference(corev1.ObjectReference{ APIVersion: "example.org/v1", Kind: "Potato", })), - data: []byte(`{"apiVersion": "foo.io/v1", "kind": "Potato"}`), + data: []byte(`{"apiVersion": "example.org/v1", "kind": "Different"}`), }, want: want{ o: composed.New(composed.FromReference(corev1.ObjectReference{ - APIVersion: "foo.io/v1", - Kind: "Potato", + APIVersion: "example.org/v1", + Kind: "Different", })), - err: errors.Errorf(errFmtKindOrGroupChanged, "example.org/v1, Kind=Potato", "foo.io/v1, Kind=Potato"), + err: errors.Errorf(errFmtKindChanged, "example.org/v1, Kind=Potato", "example.org/v1, Kind=Different"), }, }, - "ExistingKindChanged": { - reason: "We should return an error if unmarshalling the base template changed the composed resource's kind.", + "GroupCanChange": { + reason: "We should accept group changes in the base template.", args: args{ o: composed.New(composed.FromReference(corev1.ObjectReference{ APIVersion: "example.org/v1", Kind: "Potato", })), - data: []byte(`{"apiVersion": "example.org/v1", "kind": "Different"}`), + data: []byte(`{"apiVersion": "foo.io/v1", "kind": "Potato"}`), }, want: want{ o: composed.New(composed.FromReference(corev1.ObjectReference{ - APIVersion: "example.org/v1", - Kind: "Different", + APIVersion: "foo.io/v1", + Kind: "Potato", })), - err: errors.Errorf(errFmtKindOrGroupChanged, "example.org/v1, Kind=Potato", "example.org/v1, Kind=Different"), }, }, "VersionCanChange": { diff --git a/internal/controller/apiextensions/composite/composition_transforms.go b/internal/controller/apiextensions/composite/composition_transforms.go index 82b8e1291..62781536f 100644 --- a/internal/controller/apiextensions/composite/composition_transforms.go +++ b/internal/controller/apiextensions/composite/composition_transforms.go @@ -79,7 +79,7 @@ const ( ) // Resolve the supplied Transform. -func Resolve(t v1.Transform, input any) (any, error) { //nolint:gocyclo // This is a long but simple/same-y switch. +func Resolve(t v1.Transform, input any) (any, error) { var out any var err error @@ -156,7 +156,7 @@ func resolveMathMultiply(t v1.MathTransform, input any) (any, error) { // is not a number. depending on the type of clamp, the result will be either // the input or the clamp value, preserving their original types. func resolveMathClamp(t v1.MathTransform, input any) (any, error) { - in := int64(0) + var in int64 switch i := input.(type) { case int: in = int64(i) @@ -283,7 +283,7 @@ func unmarshalJSON(j extv1.JSON, output *any) error { } // ResolveString resolves a String transform. -func ResolveString(t v1.StringTransform, input any) (string, error) { //nolint:gocyclo // This is a long but simple/same-y switch. +func ResolveString(t v1.StringTransform, input any) (string, error) { switch t.Type { case v1.StringTransformTypeFormat: if t.Format == nil { @@ -419,7 +419,8 @@ func ResolveConvert(t v1.ConvertTransform, input any) (any, error) { if err != nil { return nil, err } - return f(input) + out, err := f(input) + return out, errors.Wrapf(err, "cannot convert value %s", input) } type conversionPair struct { @@ -454,66 +455,125 @@ func GetConversionFunc(t *v1.ConvertTransform, from v1.TransformIOType) (func(an // The unparam linter is complaining that these functions always return a nil // error, but we need this to be the case given some other functions in the map // may return an error. -var conversions = map[conversionPair]func(any) (any, error){ +var conversions = map[conversionPair]func(any) (any, error){ //nolint:gochecknoglobals // We treat this map as a constant. {from: v1.TransformIOTypeString, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - - return strconv.ParseInt(i.(string), 10, 64) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + return strconv.ParseInt(s, 10, 64) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - return strconv.ParseBool(i.(string)) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + return strconv.ParseBool(s) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { - return strconv.ParseFloat(i.(string), 64) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + return strconv.ParseFloat(s, 64) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatQuantity}: func(i any) (any, error) { - q, err := resource.ParseQuantity(i.(string)) + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } + q, err := resource.ParseQuantity(s) if err != nil { return nil, err } return q.AsApproximateFloat64(), nil }, - {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return strconv.FormatInt(i.(int64), 10), nil + {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + i64, ok := i.(int64) + if !ok { + return nil, errors.New("not an int64") + } + return strconv.FormatInt(i64, 10), nil }, - {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return i.(int64) == 1, nil + {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + i64, ok := i.(int64) + if !ok { + return nil, errors.New("not an int64") + } + return i64 == 1, nil }, - {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return float64(i.(int64)), nil + {from: v1.TransformIOTypeInt64, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + i64, ok := i.(int64) + if !ok { + return nil, errors.New("not an int64") + } + return float64(i64), nil }, - {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return strconv.FormatBool(i.(bool)), nil + {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + b, ok := i.(bool) + if !ok { + return nil, errors.New("not a bool") + } + return strconv.FormatBool(b), nil }, - {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - if i.(bool) { + {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + b, ok := i.(bool) + if !ok { + return nil, errors.New("not a bool") + } + if b { return int64(1), nil } return int64(0), nil }, - {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - if i.(bool) { + {from: v1.TransformIOTypeBool, to: v1.TransformIOTypeFloat64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + b, ok := i.(bool) + if !ok { + return nil, errors.New("not a bool") + } + if b { return float64(1), nil } return float64(0), nil }, - {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return strconv.FormatFloat(i.(float64), 'f', -1, 64), nil + {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeString, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + f64, ok := i.(float64) + if !ok { + return nil, errors.New("not a float64") + } + return strconv.FormatFloat(f64, 'f', -1, 64), nil }, - {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return int64(i.(float64)), nil + {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeInt64, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + f64, ok := i.(float64) + if !ok { + return nil, errors.New("not a float64") + } + return int64(f64), nil }, - {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { //nolint:unparam // See note above. - return i.(float64) == float64(1), nil + {from: v1.TransformIOTypeFloat64, to: v1.TransformIOTypeBool, format: v1.ConvertTransformFormatNone}: func(i any) (any, error) { + f64, ok := i.(float64) + if !ok { + return nil, errors.New("not a float64") + } + return f64 == float64(1), nil }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeObject, format: v1.ConvertTransformFormatJSON}: func(i any) (any, error) { + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } o := map[string]any{} - return o, json.Unmarshal([]byte(i.(string)), &o) + return o, json.Unmarshal([]byte(s), &o) }, {from: v1.TransformIOTypeString, to: v1.TransformIOTypeArray, format: v1.ConvertTransformFormatJSON}: func(i any) (any, error) { + s, ok := i.(string) + if !ok { + return nil, errors.New("not a string") + } var o []any - return o, json.Unmarshal([]byte(i.(string)), &o) + return o, json.Unmarshal([]byte(s), &o) }, } diff --git a/internal/controller/apiextensions/composite/composition_transforms_test.go b/internal/controller/apiextensions/composite/composition_transforms_test.go index 609a46eb0..d00e22231 100644 --- a/internal/controller/apiextensions/composite/composition_transforms_test.go +++ b/internal/controller/apiextensions/composite/composition_transforms_test.go @@ -646,7 +646,6 @@ func TestMathResolve(t *testing.T) { } func TestStringResolve(t *testing.T) { - type args struct { stype v1.StringTransformType fmts *string @@ -1057,8 +1056,8 @@ func TestStringResolve(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - - tr := v1.StringTransform{Type: tc.stype, + tr := v1.StringTransform{ + Type: tc.stype, Format: tc.fmts, Convert: tc.convert, Trim: tc.trim, @@ -1128,7 +1127,7 @@ func TestConvertResolve(t *testing.T) { format: (*v1.ConvertTransformFormat)(ptr.To(string(v1.ConvertTransformFormatQuantity))), }, want: want{ - err: resource.ErrFormatWrong, + err: errors.Wrap(resource.ErrFormatWrong, "cannot convert value 1000 blabla"), }, }, "SameTypeNoOp": { diff --git a/internal/controller/apiextensions/composite/connection.go b/internal/controller/apiextensions/composite/connection.go index 857e8cda9..fb8618e57 100644 --- a/internal/controller/apiextensions/composite/connection.go +++ b/internal/controller/apiextensions/composite/connection.go @@ -107,7 +107,7 @@ type SecretStoreConnectionPublisher struct { filter []string } -// NewSecretStoreConnectionPublisher returns a SecretStoreConnectionPublisher +// NewSecretStoreConnectionPublisher returns a SecretStoreConnectionPublisher. func NewSecretStoreConnectionPublisher(p managed.ConnectionPublisher, filter []string) *SecretStoreConnectionPublisher { return &SecretStoreConnectionPublisher{ publisher: p, @@ -196,7 +196,7 @@ func (fn ConnectionDetailsExtractorFn) ExtractConnection(cd resource.Composed, c // ExtractConnectionDetails extracts XR connection details from the supplied // composed resource. If no ExtractConfigs are supplied no connection details // will be returned. -func ExtractConnectionDetails(cd resource.Composed, data managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { //nolint:gocyclo // TODO(negz): Break extraction out from validation, like we do with readiness. +func ExtractConnectionDetails(cd resource.Composed, data managed.ConnectionDetails, cfg ...ConnectionDetailExtractConfig) (managed.ConnectionDetails, error) { out := map[string][]byte{} for _, cfg := range cfg { if cfg.Name == "" { @@ -255,7 +255,7 @@ type ConnectionDetailExtractConfig struct { // connection secret of the composition instance. Name string - // FromConnectionDetailKey is the key that will be used to fetch the value + // FromConnectionSecretKey is the key that will be used to fetch the value // from the given target resource's connection details. FromConnectionSecretKey *string diff --git a/internal/controller/apiextensions/composite/connection_test.go b/internal/controller/apiextensions/composite/connection_test.go index 336120541..0acc3415b 100644 --- a/internal/controller/apiextensions/composite/connection_test.go +++ b/internal/controller/apiextensions/composite/connection_test.go @@ -177,7 +177,7 @@ func TestConnectionDetailsFetcherChain(t *testing.T) { "SingleFetcherChain": { reason: "A chain of one fetcher should return only its connection details.", c: ConnectionDetailsFetcherChain{ - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return managed.ConnectionDetails{"a": []byte("b")}, nil }), }, @@ -191,7 +191,7 @@ func TestConnectionDetailsFetcherChain(t *testing.T) { "FetcherError": { reason: "We should return errors from a chained fetcher.", c: ConnectionDetailsFetcherChain{ - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return nil, errBoom }), }, @@ -205,14 +205,14 @@ func TestConnectionDetailsFetcherChain(t *testing.T) { "MultipleFetcherChain": { reason: "A chain of multiple fetchers should return all of their connection details, with later fetchers winning if there are duplicates.", c: ConnectionDetailsFetcherChain{ - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return managed.ConnectionDetails{ "a": []byte("a"), "b": []byte("b"), "c": []byte("c"), }, nil }), - ConnectionDetailsFetcherFn(func(ctx context.Context, o resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { + ConnectionDetailsFetcherFn(func(_ context.Context, _ resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) { return managed.ConnectionDetails{ "a": []byte("A"), }, nil @@ -456,7 +456,6 @@ func TestExtractConfigsFromTemplate(t *testing.T) { if diff := cmp.Diff(tc.want.cfgs, cfgs); diff != "" { t.Errorf("\n%s\nExtractConfigsFromTemplate(...): -want, +got:\n%s", tc.reason, diff) } - }) } } diff --git a/internal/controller/apiextensions/composite/environment_fetcher.go b/internal/controller/apiextensions/composite/environment_fetcher.go index 224404ec2..8a3d4c255 100644 --- a/internal/controller/apiextensions/composite/environment_fetcher.go +++ b/internal/controller/apiextensions/composite/environment_fetcher.go @@ -53,7 +53,7 @@ func (f *NilEnvironmentFetcher) Fetch(_ context.Context, _ EnvironmentFetcherReq return nil, nil } -// NewAPIEnvironmentFetcher creates a new APIEnvironmentFetcher +// NewAPIEnvironmentFetcher creates a new APIEnvironmentFetcher. func NewAPIEnvironmentFetcher(kube client.Client) *APIEnvironmentFetcher { return &APIEnvironmentFetcher{ kube: kube, diff --git a/internal/controller/apiextensions/composite/environment_fetcher_test.go b/internal/controller/apiextensions/composite/environment_fetcher_test.go index 05dc4864c..89215b6e7 100644 --- a/internal/controller/apiextensions/composite/environment_fetcher_test.go +++ b/internal/controller/apiextensions/composite/environment_fetcher_test.go @@ -212,7 +212,7 @@ func TestFetch(t *testing.T) { reason: "It should merge the data of multiple EnvironmentConfigs in the order they are listed.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, o client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, o client.Object) error { cs := o.(*v1alpha1.EnvironmentConfig) switch key.Name { case "a": diff --git a/internal/controller/apiextensions/composite/environment_selector.go b/internal/controller/apiextensions/composite/environment_selector.go index c9a6a1f80..f56bd8898 100644 --- a/internal/controller/apiextensions/composite/environment_selector.go +++ b/internal/controller/apiextensions/composite/environment_selector.go @@ -60,7 +60,7 @@ func (s *NoopEnvironmentSelector) SelectEnvironment(_ context.Context, _ resourc return nil } -// NewAPIEnvironmentSelector creates a new APIEnvironmentSelector +// NewAPIEnvironmentSelector creates a new APIEnvironmentSelector. func NewAPIEnvironmentSelector(kube client.Client) *APIEnvironmentSelector { return &APIEnvironmentSelector{ kube: kube, @@ -135,7 +135,7 @@ func (s *APIEnvironmentSelector) lookUpConfigs(ctx context.Context, cr resource. return res, nil } -func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alpha1.EnvironmentConfigList, selector *v1.EnvironmentSourceSelector) ([]corev1.ObjectReference, error) { //nolint:gocyclo // TODO: refactor +func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alpha1.EnvironmentConfigList, selector *v1.EnvironmentSourceSelector) ([]corev1.ObjectReference, error) { ec := make([]v1alpha1.EnvironmentConfig, 0) if cl == nil { @@ -184,14 +184,14 @@ func (s *APIEnvironmentSelector) buildEnvironmentConfigRefFromSelector(cl *v1alp return envConfigs, nil } -func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { //nolint:gocyclo // TODO(phisco): refactor +func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { p := make([]struct { ec v1alpha1.EnvironmentConfig val any }, len(ec)) var valsKind reflect.Kind - for i := 0; i < len(ec); i++ { + for i := range len(ec) { m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&ec[i]) if err != nil { return err @@ -231,11 +231,11 @@ func sortConfigs(ec []v1alpha1.EnvironmentConfig, f string) error { //nolint:goc vali, valj := p[i].val, p[j].val switch valsKind { //nolint:exhaustive // we only support these types case reflect.Float64: - return vali.(float64) < valj.(float64) + return vali.(float64) < valj.(float64) //nolint:forcetypeassert // Checked by reflect. case reflect.Int64: - return vali.(int64) < valj.(int64) + return vali.(int64) < valj.(int64) //nolint:forcetypeassert // Checked by reflect. case reflect.String: - return vali.(string) < valj.(string) + return vali.(string) < valj.(string) //nolint:forcetypeassert // Checked by reflect. default: // should never happen err = errors.Errorf(errFmtSortUnknownType, valsKind) diff --git a/internal/controller/apiextensions/composite/environment_selector_test.go b/internal/controller/apiextensions/composite/environment_selector_test.go index f9ec23755..ee869bd19 100644 --- a/internal/controller/apiextensions/composite/environment_selector_test.go +++ b/internal/controller/apiextensions/composite/environment_selector_test.go @@ -227,7 +227,7 @@ func TestSelect(t *testing.T) { reason: "It should create a name reference for the first selected EnvironmentConfig that matches the labels.", args: args{ kube: &test.MockClient{ - MockList: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { list := obj.(*v1alpha1.EnvironmentConfigList) match := opts[0].(client.MatchingLabels) if match["foo"] != "test-composite" { @@ -693,13 +693,11 @@ func TestSelect(t *testing.T) { APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-2", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-3", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), @@ -984,13 +982,11 @@ func TestSelect(t *testing.T) { APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-2", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), }, { - Name: "test-3", Kind: v1alpha1.EnvironmentConfigKind, APIVersion: v1alpha1.SchemeGroupVersion.String(), diff --git a/internal/controller/apiextensions/composite/extra_resources.go b/internal/controller/apiextensions/composite/extra_resources.go new file mode 100644 index 000000000..12ed83f17 --- /dev/null +++ b/internal/controller/apiextensions/composite/extra_resources.go @@ -0,0 +1,159 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +*/ + +package composite + +import ( + "context" + "reflect" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" +) + +// MaxRequirementsIterations is the maximum number of times a Function should be +// called, limiting the number of times it can request for extra resources, +// capped for safety. +const MaxRequirementsIterations = 5 + +// A FetchingFunctionRunner wraps an underlying FunctionRunner, adding support +// for fetching any extra resources requested by the function it runs. +type FetchingFunctionRunner struct { + wrapped FunctionRunner + resources ExtraResourcesFetcher +} + +// NewFetchingFunctionRunner returns a FunctionRunner that supports fetching +// extra resources. +func NewFetchingFunctionRunner(r FunctionRunner, f ExtraResourcesFetcher) *FetchingFunctionRunner { + return &FetchingFunctionRunner{wrapped: r, resources: f} +} + +// RunFunction runs a function, repeatedly fetching any extra resources it asks +// for. The function may be run up to MaxRequirementsIterations times. +func (c *FetchingFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + // Used to store the requirements returned at the previous iteration. + var requirements *fnv1.Requirements + + for i := int64(0); i <= MaxRequirementsIterations; i++ { + rsp, err := c.wrapped.RunFunction(ctx, name, req) + if err != nil { + // I can't think of any useful info to wrap this error with. + return nil, err + } + + for _, rs := range rsp.GetResults() { + if rs.GetSeverity() == fnv1.Severity_SEVERITY_FATAL { + // We won't iterate if the function returned a fatal result. + return rsp, nil + } + } + + newRequirements := rsp.GetRequirements() + if reflect.DeepEqual(newRequirements, requirements) { + // The requirements stabilized, the function is done. + return rsp, nil + } + + // Store the requirements for the next iteration. + requirements = newRequirements + + // Cleanup the extra resources from the previous iteration to store the new ones + req.ExtraResources = make(map[string]*fnv1.Resources) + + // Fetch the requested resources and add them to the desired state. + for name, selector := range newRequirements.GetExtraResources() { + resources, err := c.resources.Fetch(ctx, selector) + if err != nil { + return nil, errors.Wrapf(err, "fetching resources for %s", name) + } + + // Resources would be nil in case of not found resources. + req.ExtraResources[name] = resources + } + + // Pass down the updated context across iterations. + req.Context = rsp.GetContext() + } + // The requirements didn't stabilize after the maximum number of iterations. + return nil, errors.Errorf("requirements didn't stabilize after the maximum number of iterations (%d)", MaxRequirementsIterations) +} + +// ExistingExtraResourcesFetcher fetches extra resources requested by +// functions using the provided client.Reader. +type ExistingExtraResourcesFetcher struct { + client client.Reader +} + +// NewExistingExtraResourcesFetcher returns a new ExistingExtraResourcesFetcher. +func NewExistingExtraResourcesFetcher(c client.Reader) *ExistingExtraResourcesFetcher { + return &ExistingExtraResourcesFetcher{client: c} +} + +// Fetch fetches resources requested by functions using the provided client.Reader. +func (e *ExistingExtraResourcesFetcher) Fetch(ctx context.Context, rs *fnv1.ResourceSelector) (*fnv1.Resources, error) { + if rs == nil { + return nil, errors.New(errNilResourceSelector) + } + switch match := rs.GetMatch().(type) { + case *fnv1.ResourceSelector_MatchName: + // Fetch a single resource. + r := &kunstructured.Unstructured{} + r.SetAPIVersion(rs.GetApiVersion()) + r.SetKind(rs.GetKind()) + nn := types.NamespacedName{Name: rs.GetMatchName()} + err := e.client.Get(ctx, nn, r) + if kerrors.IsNotFound(err) { + // The resource doesn't exist. We'll return nil, which the Functions + // know means that the resource was not found. + return nil, nil + } + if err != nil { + return nil, errors.Wrap(err, errGetExtraResourceByName) + } + o, err := AsStruct(r) + if err != nil { + return nil, errors.Wrap(err, errExtraResourceAsStruct) + } + return &fnv1.Resources{Items: []*fnv1.Resource{{Resource: o}}}, nil + case *fnv1.ResourceSelector_MatchLabels: + // Fetch a list of resources. + list := &kunstructured.UnstructuredList{} + list.SetAPIVersion(rs.GetApiVersion()) + list.SetKind(rs.GetKind()) + + if err := e.client.List(ctx, list, client.MatchingLabels(match.MatchLabels.GetLabels())); err != nil { + return nil, errors.Wrap(err, errListExtraResources) + } + + resources := make([]*fnv1.Resource, len(list.Items)) + for i, r := range list.Items { + o, err := AsStruct(&r) + if err != nil { + return nil, errors.Wrap(err, errExtraResourceAsStruct) + } + resources[i] = &fnv1.Resource{Resource: o} + } + + return &fnv1.Resources{Items: resources}, nil + } + return nil, errors.New(errUnknownResourceSelector) +} diff --git a/internal/controller/apiextensions/composite/extra_resources_test.go b/internal/controller/apiextensions/composite/extra_resources_test.go new file mode 100644 index 000000000..1b955136b --- /dev/null +++ b/internal/controller/apiextensions/composite/extra_resources_test.go @@ -0,0 +1,468 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +*/ + +package composite + +import ( + "context" + "fmt" + "math/rand" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/protobuf/testing/protocmp" + kerrors "k8s.io/apimachinery/pkg/api/errors" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/test" + + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" +) + +var _ FunctionRunner = &FetchingFunctionRunner{} + +func TestExistingExtraResourcesFetcherFetch(t *testing.T) { + errBoom := errors.New("boom") + + type args struct { + rs *fnv1.ResourceSelector + c client.Reader + } + type want struct { + res *fnv1.Resources + err error + } + cases := map[string]struct { + reason string + args args + want want + }{ + "SuccessMatchName": { + reason: "We should return a valid Resources when a resource is found by name", + args: args{ + rs: &fnv1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &fnv1.ResourceSelector_MatchName{ + MatchName: "cool-resource", + }, + }, + c: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + obj.SetName("cool-resource") + return nil + }), + }, + }, + want: want{ + res: &fnv1.Resources{ + Items: []*fnv1.Resource{ + { + Resource: MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]any{ + "name": "cool-resource", + }, + }), + }, + }, + }, + }, + }, + "SuccessMatchLabels": { + reason: "We should return a valid Resources when a resource is found by labels", + args: args{ + rs: &fnv1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &fnv1.ResourceSelector_MatchLabels{ + MatchLabels: &fnv1.MatchLabels{ + Labels: map[string]string{ + "cool": "resource", + }, + }, + }, + }, + c: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + obj.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ + { + Object: map[string]interface{}{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]interface{}{ + "name": "cool-resource", + "labels": map[string]interface{}{ + "cool": "resource", + }, + }, + }, + }, + { + Object: map[string]interface{}{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]interface{}{ + "name": "cooler-resource", + "labels": map[string]interface{}{ + "cool": "resource", + }, + }, + }, + }, + } + return nil + }), + }, + }, + want: want{ + res: &fnv1.Resources{ + Items: []*fnv1.Resource{ + { + Resource: MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]any{ + "name": "cool-resource", + "labels": map[string]any{ + "cool": "resource", + }, + }, + }), + }, + { + Resource: MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "kind": "Foo", + "metadata": map[string]any{ + "name": "cooler-resource", + "labels": map[string]any{ + "cool": "resource", + }, + }, + }), + }, + }, + }, + }, + }, + "NotFoundMatchName": { + reason: "We should return no error when a resource is not found by name", + args: args{ + rs: &fnv1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &fnv1.ResourceSelector_MatchName{ + MatchName: "cool-resource", + }, + }, + c: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{Resource: "Foo"}, "cool-resource")), + }, + }, + want: want{ + res: nil, + err: nil, + }, + }, + // NOTE(phisco): No NotFound error is returned when listing resources by labels, so there is no NotFoundMatchLabels test case. + "ErrorMatchName": { + reason: "We should return any other error encountered when getting a resource by name", + args: args{ + rs: &fnv1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &fnv1.ResourceSelector_MatchName{ + MatchName: "cool-resource", + }, + }, + c: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, + }, + want: want{ + res: nil, + err: errBoom, + }, + }, + "ErrorMatchLabels": { + reason: "We should return any other error encountered when listing resources by labels", + args: args{ + rs: &fnv1.ResourceSelector{ + ApiVersion: "test.crossplane.io/v1", + Kind: "Foo", + Match: &fnv1.ResourceSelector_MatchLabels{ + MatchLabels: &fnv1.MatchLabels{ + Labels: map[string]string{ + "cool": "resource", + }, + }, + }, + }, + c: &test.MockClient{ + MockList: test.NewMockListFn(errBoom), + }, + }, + want: want{ + res: nil, + err: errBoom, + }, + }, + } + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + g := NewExistingExtraResourcesFetcher(tc.args.c) + res, err := g.Fetch(context.Background(), tc.args.rs) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.res, res, protocmp.Transform()); diff != "" { + t.Errorf("\n%s\nGet(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} + +func TestFetchingFunctionRunner(t *testing.T) { + coolResource := MustStruct(map[string]any{ + "apiVersion": "test.crossplane.io/v1", + "Kind": "CoolResource", + "metadata": map[string]any{ + "name": "pretty-cool", + }, + }) + + // Used in the Success test + called := false + + type params struct { + wrapped FunctionRunner + resources ExtraResourcesFetcher + } + type args struct { + ctx context.Context + name string + req *fnv1.RunFunctionRequest + } + type want struct { + rsp *fnv1.RunFunctionResponse + err error + } + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "RunFunctionError": { + reason: "We should return an error if the wrapped FunctionRunner does", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + return nil, errors.New("boom") + }), + }, + args: args{}, + want: want{ + err: cmpopts.AnyError, + }, + }, + "FatalResult": { + reason: "We should return early if the function returns a fatal result", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ + { + Severity: fnv1.Severity_SEVERITY_FATAL, + }, + }, + } + return rsp, nil + }), + }, + args: args{}, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ + { + Severity: fnv1.Severity_SEVERITY_FATAL, + }, + }, + }, + err: nil, + }, + }, + "NoRequirements": { + reason: "We should return the response unchanged if there are no requirements", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + }, + }, + } + return rsp, nil + }), + }, + args: args{}, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Results: []*fnv1.Result{ + { + Severity: fnv1.Severity_SEVERITY_NORMAL, + }, + }, + }, + err: nil, + }, + }, + "FetchResourcesError": { + reason: "We should return any error encountered when fetching extra resources", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + Kind: "CoolResource", + }, + }, + }, + } + return rsp, nil + }), + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *fnv1.ResourceSelector) (*fnv1.Resources, error) { + return nil, errors.New("boom") + }), + }, + args: args{ + req: &fnv1.RunFunctionRequest{}, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "RequirementsDidntStabilizeError": { + reason: "We should return an error if the function's requirements never stabilize", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, _ *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + rsp := &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + + // What are the chances we get the same number 5 times in a row? + Kind: fmt.Sprintf("CoolResource%d", rand.Int31()), + }, + }, + }, + } + return rsp, nil + }), + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *fnv1.ResourceSelector) (*fnv1.Resources, error) { + return &fnv1.Resources{}, nil + }), + }, + args: args{ + req: &fnv1.RunFunctionRequest{}, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "Success": { + reason: "We should return the fetched resources", + params: params{ + wrapped: FunctionRunnerFn(func(_ context.Context, _ string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + // We only expect to be sent extra resources the second time + // we're called, in response to our requirements. + if called { + want := &fnv1.RunFunctionRequest{ + ExtraResources: map[string]*fnv1.Resources{ + "gimme": { + Items: []*fnv1.Resource{{Resource: coolResource}}, + }, + }, + } + + if diff := cmp.Diff(want, req, protocmp.Transform()); diff != "" { + t.Errorf("RunFunction(): -want, +got:\n%s", diff) + return nil, errors.New("unexpected RunFunctionRequest") + } + } + + called = true + + rsp := &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + Kind: "CoolResource", + }, + }, + }, + } + return rsp, nil + }), + resources: ExtraResourcesFetcherFn(func(_ context.Context, _ *fnv1.ResourceSelector) (*fnv1.Resources, error) { + r := &fnv1.Resources{ + Items: []*fnv1.Resource{{Resource: coolResource}}, + } + return r, nil + }), + }, + args: args{ + req: &fnv1.RunFunctionRequest{}, + }, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Requirements: &fnv1.Requirements{ + ExtraResources: map[string]*fnv1.ResourceSelector{ + "gimme": { + ApiVersion: "test.crossplane.io/v1", + Kind: "CoolResource", + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + r := NewFetchingFunctionRunner(tc.params.wrapped, tc.params.resources) + rsp, err := r.RunFunction(tc.args.ctx, tc.args.name, tc.args.req) + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.RunFunction(...): -want, +got:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.rsp, rsp, protocmp.Transform()); diff != "" { + t.Errorf("\n%s\nr.RunFunction(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/controller/apiextensions/composite/fuzz_test.go b/internal/controller/apiextensions/composite/fuzz_test.go index b5a7270d3..b90c689ce 100644 --- a/internal/controller/apiextensions/composite/fuzz_test.go +++ b/internal/controller/apiextensions/composite/fuzz_test.go @@ -32,9 +32,7 @@ import ( pkgmetav1alpha1 "github.com/crossplane/crossplane/apis/pkg/meta/v1alpha1" ) -var ( - fuzzScheme = runtime.NewScheme() -) +var fuzzScheme = runtime.NewScheme() func init() { if err := pkgmetav1alpha1.SchemeBuilder.AddToScheme(fuzzScheme); err != nil { @@ -48,7 +46,7 @@ func init() { } } -// Adds a type to the patch +// Adds a type to the patch. func addType(p *v1.Patch, i int) { chooseType := i % 5 switch chooseType { @@ -66,7 +64,7 @@ func addType(p *v1.Patch, i int) { } func FuzzPatchApply(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) cp := &fake.Composite{} @@ -88,7 +86,7 @@ func FuzzPatchApply(f *testing.F) { }) } -// Adds a type to the transform +// Adds a type to the transform. func addTransformType(t *v1.Transform, i int) error { chooseType := i % 4 switch chooseType { @@ -122,7 +120,7 @@ func addTransformType(t *v1.Transform, i int) error { } func FuzzTransform(f *testing.F) { - f.Fuzz(func(tt *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) t := &v1.Transform{} diff --git a/internal/controller/apiextensions/composite/ready.go b/internal/controller/apiextensions/composite/ready.go index c5744c720..13284179a 100644 --- a/internal/controller/apiextensions/composite/ready.go +++ b/internal/controller/apiextensions/composite/ready.go @@ -30,7 +30,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -// Error strings +// Error strings. const ( errInvalidCheck = "invalid" errPaveObject = "cannot lookup field paths in supplied object" @@ -60,7 +60,7 @@ const ( ) // ReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type ReadinessCheck struct { // Type indicates the type of probe you'd like to use. Type ReadinessCheckType @@ -79,7 +79,7 @@ type ReadinessCheck struct { } // MatchConditionReadinessCheck is used to indicate how to tell whether a resource is ready -// for consumption +// for consumption. type MatchConditionReadinessCheck struct { // Type indicates the type of condition you'd like to use. Type xpv1.ConditionType @@ -168,8 +168,6 @@ func (c ReadinessCheck) Validate() error { } // IsReady runs the readiness check against the supplied object. -// -//nolint:gocyclo // just a switch func (c ReadinessCheck) IsReady(p *fieldpath.Paved, o ConditionedObject) (bool, error) { if err := c.Validate(); err != nil { return false, errors.Wrap(err, errInvalidCheck) diff --git a/internal/controller/apiextensions/composite/reconciler.go b/internal/controller/apiextensions/composite/reconciler.go index a3e9b6f51..64d36b373 100644 --- a/internal/controller/apiextensions/composite/reconciler.go +++ b/internal/controller/apiextensions/composite/reconciler.go @@ -20,18 +20,17 @@ package composite import ( "context" "fmt" + "math/rand" "strconv" "time" + corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -41,10 +40,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/engine" ) const ( @@ -53,7 +54,7 @@ const ( finalizer = "composite.apiextensions.crossplane.io" ) -// Error strings +// Error strings. const ( errGet = "cannot get composite resource" errUpdate = "cannot update composite resource" @@ -73,6 +74,9 @@ const ( errCompose = "cannot compose resources" errInvalidResources = "some resources were invalid, check events" errRenderCD = "cannot render composed resource" + errSyncResources = "cannot sync composed resources" + errGetClaim = "cannot get referenced claim" + errParseClaimRef = "cannot parse claim reference" reconcilePausedMsg = "Reconciliation (including deletion) is paused via the pause annotation" ) @@ -87,6 +91,11 @@ const ( reasonPaused event.Reason = "ReconciliationPaused" ) +// Condition reasons. +const ( + reasonFatalError xpv1.ConditionReason = "FatalError" +) + // ControllerName returns the recommended name for controllers that use this // package to reconcile a particular kind of composite resource. func ControllerName(name string) string { @@ -186,7 +195,48 @@ type CompositionRequest struct { type CompositionResult struct { Composed []ComposedResource ConnectionDetails managed.ConnectionDetails - Events []event.Event + Events []TargetedEvent + Conditions []TargetedCondition +} + +// A CompositionTarget is the target of a composition event or condition. +type CompositionTarget string + +// Composition event and condition targets. +const ( + CompositionTargetComposite CompositionTarget = "Composite" + CompositionTargetCompositeAndClaim CompositionTarget = "CompositeAndClaim" +) + +// A TargetedEvent represents an event produced by the composition process. It +// can target either the XR only, or both the XR and the claim. +type TargetedEvent struct { + event.Event + Target CompositionTarget + // Detail about the event to be included in the composite resource event but + // not the claim. + Detail string +} + +// AsEvent produces the base event. +func (e *TargetedEvent) AsEvent() event.Event { + return event.Event{Type: e.Type, Reason: e.Reason, Message: e.Message, Annotations: e.Annotations} +} + +// AsDetailedEvent produces an event with additional detail if available. +func (e *TargetedEvent) AsDetailedEvent() event.Event { + if e.Detail == "" { + return e.AsEvent() + } + msg := fmt.Sprintf("%s: %s", e.Detail, e.Message) + return event.Event{Type: e.Type, Reason: e.Reason, Message: msg, Annotations: e.Annotations} +} + +// A TargetedCondition represents a condition produced by the composition +// process. It can target either the XR only, or both the XR and the claim. +type TargetedCondition struct { + xpv1.Condition + Target CompositionTarget } // A Composer composes (i.e. creates, updates, or deletes) resources given the @@ -228,23 +278,29 @@ func WithRecorder(er event.Recorder) ReconcilerOption { } } -// WithPollInterval specifies how long the Reconciler should wait before queueing -// a new reconciliation after a successful reconcile. The Reconciler requeues -// after a specified duration when it is not actively waiting for an external -// operation, but wishes to check whether resources it does not have a watch on -// (i.e. composed resources) need to be reconciled. -func WithPollInterval(after time.Duration) ReconcilerOption { +// A PollIntervalHook determines how frequently the XR should poll its composed +// resources. +type PollIntervalHook func(ctx context.Context, xr *composite.Unstructured) time.Duration + +// WithPollIntervalHook specifies how to determine how long the Reconciler +// should wait before queueing a new reconciliation after a successful +// reconcile. +func WithPollIntervalHook(h PollIntervalHook) ReconcilerOption { return func(r *Reconciler) { - r.pollInterval = after + r.pollInterval = h } } -// WithClient specifies how the Reconciler should interact with the Kubernetes -// API. -func WithClient(c client.Client) ReconcilerOption { - return func(r *Reconciler) { - r.client = c - } +// WithPollInterval specifies how long the Reconciler should wait before +// queueing a new reconciliation after a successful reconcile. The Reconciler +// uses the interval jittered +/- 10% when all composed resources are ready. It +// polls twice as frequently (i.e. at half the supplied interval) +/- 10% when +// waiting for composed resources to become ready. +func WithPollInterval(interval time.Duration) ReconcilerOption { + return WithPollIntervalHook(func(_ context.Context, _ *composite.Unstructured) time.Duration { + // Jitter the poll interval +/- 10%. + return interval + time.Duration((rand.Float64()-0.5)*2*(float64(interval)*0.1)) //nolint:gosec // No need for secure randomness + }) } // WithCompositionRevisionFetcher specifies how the composition to be used should be @@ -320,11 +376,13 @@ func WithComposer(c Composer) ReconcilerOption { } } -// WithKindObserver specifies how the Reconciler should observe kinds for -// realtime events. -func WithKindObserver(o KindObserver) ReconcilerOption { +// WithWatchStarter specifies how the Reconciler should start watches for any +// resources it composes. +func WithWatchStarter(controllerName string, h handler.EventHandler, w WatchStarter) ReconcilerOption { return func(r *Reconciler) { - r.kindObserver = o + r.controllerName = controllerName + r.watchHandler = h + r.engine = w } } @@ -335,7 +393,7 @@ type revision struct { // A CompositionRevisionValidator validates the supplied CompositionRevision. type CompositionRevisionValidator interface { - Validate(*v1.CompositionRevision) error + Validate(rev *v1.CompositionRevision) error } // A CompositionRevisionValidatorFn is a function that validates a @@ -347,6 +405,27 @@ func (fn CompositionRevisionValidatorFn) Validate(c *v1.CompositionRevision) err return fn(c) } +// A WatchStarter can start a new watch. XR controllers use this to dynamically +// start watches when they compose new kinds of resources. +type WatchStarter interface { + // StartWatches starts the supplied watches, if they're not running already. + StartWatches(name string, ws ...engine.Watch) error +} + +// A NopWatchStarter does nothing. +type NopWatchStarter struct{} + +// StartWatches does nothing. +func (n *NopWatchStarter) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// A WatchStarterFn is a function that can start a new watch. +type WatchStarterFn func(name string, ws ...engine.Watch) error + +// StartWatches starts the supplied watches, if they're not running already. +func (fn WatchStarterFn) StartWatches(name string, ws ...engine.Watch) error { + return fn(name, ws...) +} + type environment struct { EnvironmentFetcher } @@ -359,34 +438,15 @@ type compositeResource struct { managed.ConnectionPublisher } -// KindObserver tracks kinds of referenced composed resources in composite -// resources in order to start watches for them for realtime events. -type KindObserver interface { - // WatchComposedResources starts a watch of the given kinds to trigger reconciles when - // a referenced object of those kinds changes. - WatchComposedResources(kind ...schema.GroupVersionKind) -} - -// KindObserverFunc implements KindObserver as a function. -type KindObserverFunc func(kind ...schema.GroupVersionKind) - -// WatchComposedResources starts a watch of the given kinds to trigger reconciles when -// a referenced object of those kinds changes. -func (fn KindObserverFunc) WatchComposedResources(kind ...schema.GroupVersionKind) { - fn(kind...) -} - // NewReconciler returns a new Reconciler of composite resources. -func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - +func NewReconciler(c client.Client, of resource.CompositeKind, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - client: kube, + client: c, gvk: schema.GroupVersionKind(of), revision: revision{ - CompositionRevisionFetcher: NewAPIRevisionFetcher(resource.ClientApplicator{Client: kube, Applicator: resource.NewAPIPatchingApplicator(kube)}), + CompositionRevisionFetcher: NewAPIRevisionFetcher(resource.ClientApplicator{Client: c, Applicator: resource.NewAPIPatchingApplicator(c)}), CompositionRevisionValidator: CompositionRevisionValidatorFn(func(rev *v1.CompositionRevision) error { // TODO(negz): Presumably this validation will eventually be // removed in favor of the new Composition validation @@ -405,23 +465,26 @@ func NewReconciler(mgr manager.Manager, of resource.CompositeKind, opts ...Recon }, composite: compositeResource{ - Finalizer: resource.NewAPIFinalizer(kube, finalizer), - CompositionSelector: NewAPILabelSelectorResolver(kube), + Finalizer: resource.NewAPIFinalizer(c, finalizer), + CompositionSelector: NewAPILabelSelectorResolver(c), EnvironmentSelector: NewNoopEnvironmentSelector(), - Configurator: NewConfiguratorChain(NewAPINamingConfigurator(kube), NewAPIConfigurator(kube)), + Configurator: NewConfiguratorChain(NewAPINamingConfigurator(c), NewAPIConfigurator(c)), // TODO(negz): In practice this is a filtered publisher that will // never filter any keys. Is there an unfiltered variant we could // use by default instead? - ConnectionPublisher: NewAPIFilteredSecretPublisher(kube, []string{}), + ConnectionPublisher: NewAPIFilteredSecretPublisher(c, []string{}), }, - resource: NewPTComposer(kube), + resource: NewPTComposer(c), + + // Dynamic watches are disabled by default. + engine: &NopWatchStarter{}, log: logging.NewNopLogger(), record: event.NewNopRecorder(), - pollInterval: defaultPollInterval, + pollInterval: func(_ context.Context, _ *composite.Unstructured) time.Duration { return defaultPollInterval }, } for _, f := range opts { @@ -442,17 +505,21 @@ type Reconciler struct { revision revision composite compositeResource - resource Composer - kindObserver KindObserver + resource Composer + + // Used to dynamically start composed resource watches. + controllerName string + engine WatchStarter + watchHandler handler.EventHandler log logging.Logger record event.Recorder - pollInterval time.Duration + pollInterval PollIntervalHook } // Reconcile a composite resource. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcile methods are often very complex. Be wary. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcile methods are often very complex. Be wary. log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -592,6 +659,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil } + err = errors.Wrap(err, errCompose) r.record.Event(xr, event.Warning(reasonCompose, err)) if kerrors.IsInvalid(err) { @@ -605,15 +673,36 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco err = errors.Wrap(errors.New(errInvalidResources), errCompose) } xr.SetConditions(xpv1.ReconcileError(err)) + + meta := r.handleCommonCompositionResult(ctx, res, xr) + // We encountered a fatal error. For any custom status conditions that were + // not received due to the fatal error, mark them as unknown. + for _, c := range xr.GetConditions() { + if xpv1.IsSystemConditionType(c.Type) { + continue + } + if !meta.conditionTypesSeen[c.Type] { + c.Status = corev1.ConditionUnknown + c.Reason = reasonFatalError + c.Message = "A fatal error occurred before the status of this condition could be determined." + xr.SetConditions(c) + } + } + return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } - if r.kindObserver != nil { - var gvks []schema.GroupVersionKind - for _, ref := range xr.GetResourceReferences() { - gvks = append(gvks, ref.GroupVersionKind()) - } - r.kindObserver.WatchComposedResources(gvks...) + ws := make([]engine.Watch, len(xr.GetResourceReferences())) + for i, ref := range xr.GetResourceReferences() { + ws[i] = engine.WatchFor(composed.New(composed.FromReference(ref)), engine.WatchTypeComposedResource, r.watchHandler) + } + + // StartWatches is a no-op unless the realtime compositions feature flag is + // enabled. When the flag is enabled, the ControllerEngine that starts this + // controller also starts a garbage collector for its watches. + if err := r.engine.StartWatches(r.controllerName, ws...); err != nil { + // TODO(negz): If we stop polling this will be a more serious error. + log.Debug("Cannot start watches for composed resources. Relying on polling to know when they change.", "controller-name", r.controllerName, "error", err) } published, err := r.composite.PublishConnection(ctx, xr, res.ConnectionDetails) @@ -633,16 +722,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco r.record.Event(xr, event.Normal(reasonPublish, "Successfully published connection details")) } - warnings := 0 - for _, e := range res.Events { - if e.Type == event.TypeWarning { - warnings++ - } - log.Debug(e.Message) - r.record.Event(xr, e) - } + meta := r.handleCommonCompositionResult(ctx, res, xr) - if warnings == 0 { + if meta.numWarningEvents == 0 { // We don't consider warnings severe enough to prevent the XR from being // considered synced (i.e. severe enough to return a ReconcileError) but // they are severe enough that we probably shouldn't say we successfully @@ -651,6 +733,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } var unready []ComposedResource + var unsynced []ComposedResource for i, cd := range res.Composed { // Specifying a name for P&T templates is optional but encouraged. // If there was no name, fall back to using the index. @@ -659,82 +742,129 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco id = strconv.Itoa(i) } + if !cd.Synced { + log.Debug("Composed resource is not yet valid", "id", id) + unsynced = append(unsynced, cd) + r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet valid", id))) + } + if !cd.Ready { log.Debug("Composed resource is not yet ready", "id", id) unready = append(unready, cd) r.record.Event(xr, event.Normal(reasonCompose, fmt.Sprintf("Composed resource %q is not yet ready", id))) - continue } } - xr.SetConditions(xpv1.ReconcileSuccess()) - - // TODO(muvaf): If a resource becomes Unavailable at some point, should we - // still report it as Creating? - if len(unready) > 0 { - // We want to requeue to wait for our composed resources to - // become ready, since we can't watch them. - names := make([]string, len(unready)) - for i, cd := range unready { - names[i] = string(cd.ResourceName) - } - // sort for stable condition messages. With functions, we don't have a - // stable order otherwise. - xr.SetConditions(xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, names)))) + if updateXRConditions(xr, unsynced, unready) { + // This requeue is subject to rate limiting. Requeues will exponentially + // backoff from 1 to 30 seconds. See the 'definition' (XRD) reconciler + // that sets up the ratelimiter. return reconcile.Result{Requeue: true}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) } // We requeue after our poll interval because we can't watch composed // resources - we can't know what type of resources we might compose // when this controller is started. - xr.SetConditions(xpv1.Available()) - return reconcile.Result{RequeueAfter: r.pollInterval}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) -} - -// EnqueueForCompositionRevisionFunc returns a function that enqueues (the -// related) XRs when a new CompositionRevision is created. This speeds up -// reconciliation of XRs on changes to the Composition by not having to wait for -// the 60s sync period, but be instant. -func EnqueueForCompositionRevisionFunc(of resource.CompositeKind, list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error, log logging.Logger) func(ctx context.Context, createEvent runtimeevent.CreateEvent, q workqueue.RateLimitingInterface) { - return func(ctx context.Context, createEvent runtimeevent.CreateEvent, q workqueue.RateLimitingInterface) { - rev, ok := createEvent.Object.(*v1.CompositionRevision) - if !ok { - // should not happen - return + return reconcile.Result{RequeueAfter: r.pollInterval(ctx, xr)}, errors.Wrap(r.client.Status().Update(ctx, xr), errUpdateStatus) +} + +// updateXRConditions updates the conditions of the supplied composite resource +// based on the supplied composed resources. It returns true if the XR should be +// requeued immediately. +func updateXRConditions(xr *composite.Unstructured, unsynced, unready []ComposedResource) (requeueImmediately bool) { + readyCond := xpv1.Available() + syncedCond := xpv1.ReconcileSuccess() + if len(unsynced) > 0 { + // We want to requeue to wait for our composed resources to + // become ready, since we can't watch them. + syncedCond = xpv1.ReconcileError(errors.New(errSyncResources)).WithMessage(fmt.Sprintf("Invalid resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, getComposerResourcesNames(unsynced)))) + requeueImmediately = true + } + if len(unready) > 0 { + // We want to requeue to wait for our composed resources to + // become ready, since we can't watch them. + readyCond = xpv1.Creating().WithMessage(fmt.Sprintf("Unready resources: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, getComposerResourcesNames(unready)))) + requeueImmediately = true + } + xr.SetConditions(syncedCond, readyCond) + return requeueImmediately +} + +func getComposerResourcesNames(cds []ComposedResource) []string { + names := make([]string, len(cds)) + for i, cd := range cds { + names[i] = string(cd.ResourceName) + } + return names +} + +type compositionResultMeta struct { + numWarningEvents int + conditionTypesSeen map[xpv1.ConditionType]bool +} + +func (r *Reconciler) handleCommonCompositionResult(ctx context.Context, res CompositionResult, xr *composite.Unstructured) compositionResultMeta { + log := r.log.WithValues( + "uid", xr.GetUID(), + "version", xr.GetResourceVersion(), + "name", xr.GetName(), + ) + + cm, err := getClaimFromXR(ctx, r.client, xr) + if err != nil { + log.Debug(errGetClaim, "error", err) + } + + numWarningEvents := 0 + for _, e := range res.Events { + if e.Event.Type == event.TypeWarning { + numWarningEvents++ } - // get all XRs - xrs := kunstructured.UnstructuredList{} - xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) - xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") - if err := list(ctx, &xrs); err != nil { - // logging is most we can do here. This is a programming error if it happens. - log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) - return + detailedEvent := e.AsDetailedEvent() + log.Debug(detailedEvent.Message) + r.record.Event(xr, detailedEvent) + + if e.Target == CompositionTargetCompositeAndClaim && cm != nil { + r.record.Event(cm, e.AsEvent()) } + } - // enqueue all those that reference the Composition of this revision - compName := rev.Labels[v1.LabelCompositionName] - if compName == "" { - return + conditionTypesSeen := make(map[xpv1.ConditionType]bool) + for _, c := range res.Conditions { + if xpv1.IsSystemConditionType(c.Condition.Type) { + // Do not let users update system conditions. + continue + } + conditionTypesSeen[c.Condition.Type] = true + xr.SetConditions(c.Condition) + if c.Target == CompositionTargetCompositeAndClaim { + // We can ignore the error as it only occurs if given a system condition. + _ = xr.SetClaimConditionTypes(c.Condition.Type) } - for _, u := range xrs.Items { - xr := composite.Unstructured{Unstructured: u} + } - // only automatic - if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { - continue - } + return compositionResultMeta{ + numWarningEvents: numWarningEvents, + conditionTypesSeen: conditionTypesSeen, + } +} - // only those that reference the right Composition - if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { - continue - } +func getClaimFromXR(ctx context.Context, c client.Client, xr *composite.Unstructured) (*claim.Unstructured, error) { + if xr.GetClaimReference() == nil { + return nil, nil + } - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: xr.GetName(), - Namespace: xr.GetNamespace(), - }}) - } + gv, err := schema.ParseGroupVersion(xr.GetClaimReference().APIVersion) + if err != nil { + return nil, errors.Wrap(err, errParseClaimRef) + } + + claimGVK := gv.WithKind(xr.GetClaimReference().Kind) + cm := claim.New(claim.WithGroupVersionKind(claimGVK)) + claimNN := types.NamespacedName{Namespace: xr.GetClaimReference().Namespace, Name: xr.GetClaimReference().Name} + if err := c.Get(ctx, claimNN, cm); err != nil { + return nil, errors.Wrap(err, errGetClaim) } + return cm, nil } diff --git a/internal/controller/apiextensions/composite/reconciler_test.go b/internal/controller/apiextensions/composite/reconciler_test.go index 76f8b74ba..4fa0dce53 100644 --- a/internal/controller/apiextensions/composite/reconciler_test.go +++ b/internal/controller/apiextensions/composite/reconciler_test.go @@ -18,7 +18,7 @@ package composite import ( "context" - "reflect" + "fmt" "testing" "time" @@ -27,39 +27,36 @@ import ( corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/claim" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/engine" ) -var _ Composer = ComposerSelectorFn(func(cm *v1.CompositionMode) Composer { return nil }) +var _ Composer = ComposerSelectorFn(func(_ *v1.CompositionMode) Composer { return nil }) func TestReconcile(t *testing.T) { errBoom := errors.New("boom") cd := managed.ConnectionDetails{"a": []byte("b")} type args struct { - mgr manager.Manager - of resource.CompositeKind - opts []ReconcilerOption + client client.Client + of resource.CompositeKind + opts []ReconcilerOption } type want struct { r reconcile.Result @@ -76,11 +73,8 @@ func TestReconcile(t *testing.T) { "CompositeResourceNotFound": { reason: "We should not return an error if the composite resource was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), }, }, want: want{ @@ -90,11 +84,8 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceError": { reason: "We should return error encountered while getting the composite resource.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }), + client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), }, }, want: want{ @@ -104,20 +95,19 @@ func TestReconcile(t *testing.T) { "UnpublishConnectionError": { reason: "We should return any error encountered while unpublishing connection details.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(want resource.Composite) { + want.SetDeletionTimestamp(&now) + want.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errUnpublish))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(want resource.Composite) { - want.SetDeletionTimestamp(&now) - want.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errUnpublish))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithConnectionPublishers(managed.ConnectionPublisherFns{ - UnpublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) error { + UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { return errBoom }, }), @@ -130,24 +120,23 @@ func TestReconcile(t *testing.T) { "RemoveFinalizerError": { reason: "We should return any error encountered while removing finalizer.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileError(errors.Wrap(errBoom, errRemoveFinalizer))) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), WithConnectionPublishers(managed.ConnectionPublisherFns{ - UnpublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) error { + UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { return nil }, }), @@ -160,24 +149,23 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should return no error when deleted successfully.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetDeletionTimestamp(&now) + cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileSuccess()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetDeletionTimestamp(&now) - cr.SetConditions(xpv1.Deleting(), xpv1.ReconcileSuccess()) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ - RemoveFinalizerFn: func(ctx context.Context, obj resource.Object) error { + RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }, }), WithConnectionPublishers(managed.ConnectionPublisherFns{ - UnpublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) error { + UnpublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) error { return nil }, }), @@ -190,16 +178,15 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error encountered while adding finalizer.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errAddFinalizer))) - })), - }), WithCompositeFinalizer(resource.FinalizerFns{ - AddFinalizerFn: func(ctx context.Context, obj resource.Object) error { + AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }, }), @@ -212,14 +199,13 @@ func TestReconcile(t *testing.T) { "SelectCompositionError": { reason: "We should return any error encountered while selecting a composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSelectComp))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errSelectComp))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return errBoom @@ -233,15 +219,14 @@ func TestReconcile(t *testing.T) { "FetchCompositionError": { reason: "We should return any error encountered while fetching a composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errFetchComp))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errFetchComp))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -259,15 +244,14 @@ func TestReconcile(t *testing.T) { "ValidateCompositionError": { reason: "We should return any error encountered while validating our Composition.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errValidate))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errValidate))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -288,15 +272,14 @@ func TestReconcile(t *testing.T) { "ConfigureCompositeError": { reason: "We should return any error encountered while configuring the composite resource.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errConfigure))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errConfigure))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -318,14 +301,13 @@ func TestReconcile(t *testing.T) { "SelectEnvironmentError": { reason: "We should return any error encountered while selecting the environment.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), - WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil })), WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { @@ -333,8 +315,8 @@ func TestReconcile(t *testing.T) { return c, nil })), WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), - WithConfigurator(ConfiguratorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { return nil })), - WithEnvironmentSelector(EnvironmentSelectorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), + WithEnvironmentSelector(EnvironmentSelectorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return errBoom })), }, @@ -346,14 +328,13 @@ func TestReconcile(t *testing.T) { "FetchEnvironmentError": { reason: "We should requeue on any error encountered while fetching the environment.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), - WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, _ resource.Composite) error { return nil })), WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { @@ -361,8 +342,8 @@ func TestReconcile(t *testing.T) { return c, nil })), WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), - WithConfigurator(ConfiguratorFn(func(ctx context.Context, cr resource.Composite, rev *v1.CompositionRevision) error { return nil })), - WithEnvironmentFetcher(EnvironmentFetcherFn(func(ctx context.Context, req EnvironmentFetcherRequest) (*Environment, error) { + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), + WithEnvironmentFetcher(EnvironmentFetcherFn(func(_ context.Context, _ EnvironmentFetcherRequest) (*Environment, error) { return nil, errBoom })), }, @@ -374,15 +355,14 @@ func TestReconcile(t *testing.T) { "ComposeResourcesError": { reason: "We should return any error encountered while composing resources.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errCompose))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errCompose))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -395,7 +375,7 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, errBoom })), }, @@ -407,15 +387,14 @@ func TestReconcile(t *testing.T) { "PublishConnectionDetailsError": { reason: "We should return any error encountered while publishing connection details.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPublish))) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileError(errors.Wrap(errBoom, errPublish))) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -428,11 +407,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return false, errBoom }, }), @@ -445,15 +424,14 @@ func TestReconcile(t *testing.T) { "CompositionWarnings": { reason: "We should not requeue if our Composer returned warning events.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(xr resource.Composite) { + xr.SetCompositionReference(&corev1.ObjectReference{}) + xr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -469,13 +447,18 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ - Events: []event.Event{event.Warning("Warning", errBoom)}, + Events: []TargetedEvent{ + { + Event: event.Warning("Warning", errBoom), + Target: CompositionTargetComposite, + }, + }, }, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return false, nil }, }), @@ -488,15 +471,14 @@ func TestReconcile(t *testing.T) { "ComposedResourcesNotReady": { reason: "We should requeue if any of our composed resources are not yet ready.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Creating().WithMessage("Unready resources: cat, cow, elephant, and 1 more")) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Creating().WithMessage("Unready resources: cat, cow, elephant, and 1 more")) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -512,31 +494,37 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ Composed: []ComposedResource{{ ResourceName: "elephant", Ready: false, + Synced: true, }, { ResourceName: "cow", Ready: false, + Synced: true, }, { ResourceName: "pig", Ready: true, + Synced: true, }, { ResourceName: "cat", Ready: false, + Synced: true, }, { ResourceName: "dog", Ready: true, + Synced: true, }, { ResourceName: "snake", Ready: false, + Synced: true, }}, }, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, c managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return false, nil }, }), @@ -549,16 +537,24 @@ func TestReconcile(t *testing.T) { "ComposedResourcesReady": { reason: "We should requeue after our poll interval if all of our composed resources are ready.", args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetResourceReferences([]corev1.ObjectReference{{ + APIVersion: "example.org/v1", + Kind: "ComposedResource", + }}) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetResourceReferences([]corev1.ObjectReference{{ + APIVersion: "example.org/v1", + Kind: "ComposedResource", + }}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetCompositionReference(&corev1.ObjectReference{}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -574,11 +570,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{ConnectionDetails: cd}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { want := cd if diff := cmp.Diff(want, got); diff != "" { t.Errorf("PublishConnection(...): -want, +got:\n%s", diff) @@ -586,6 +582,19 @@ func TestReconcile(t *testing.T) { return true, nil }, }), + WithWatchStarter("cool-controller", nil, WatchStarterFn(func(_ string, ws ...engine.Watch) error { + cd := composed.New(composed.FromReference(corev1.ObjectReference{ + APIVersion: "example.org/v1", + Kind: "ComposedResource", + })) + want := []engine.Watch{engine.WatchFor(cd, engine.WatchTypeComposedResource, nil)} + + if diff := cmp.Diff(want, ws, cmp.AllowUnexported(engine.Watch{})); diff != "" { + t.Errorf("StartWatches(...): -want, +got:\n%s", diff) + } + + return nil + })), }, }, want: want{ @@ -595,17 +604,14 @@ func TestReconcile(t *testing.T) { "ReconciliationPausedSuccessful": { reason: `If a composite resource has the pause annotation with value "true", there should be no further requeue requests.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - cr.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) - })), - }), + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + cr.SetConditions(xpv1.ReconcilePaused().WithMessage(reconcilePausedMsg)) + })), }, }, want: want{ @@ -615,14 +621,11 @@ func TestReconcile(t *testing.T) { "ReconciliationPausedError": { reason: `If a composite resource has the pause annotation with value "true" and the status update due to reconciliation being paused fails, error should be reported causing an exponentially backed-off requeue.`, args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) - })), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }), + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: "true"}) + })), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), }, }, want: want{ @@ -632,20 +635,19 @@ func TestReconcile(t *testing.T) { "ReconciliationResumes": { reason: `If a composite resource has the pause annotation with some value other than "true" and the Synced=False/ReconcilePaused status condition, reconciliation should resume with requeueing.`, args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) + cr.SetConditions(xpv1.ReconcilePaused()) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + cr.SetCompositionReference(&corev1.ObjectReference{}) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) - cr.SetConditions(xpv1.ReconcilePaused()) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetAnnotations(map[string]string{meta.AnnotationKeyReconciliationPaused: ""}) - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - cr.SetCompositionReference(&corev1.ObjectReference{}) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -661,11 +663,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return true, nil }, }), @@ -678,20 +680,19 @@ func TestReconcile(t *testing.T) { "ReconciliationResumesAfterAnnotationRemoval": { reason: `If a composite resource has the pause annotation removed and the Synced=False/ReconcilePaused status condition, reconciliation should resume with requeueing.`, args: args{ - mgr: &fake.Manager{}, + client: &test.MockClient{ + MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { + // no annotation atm + // (but reconciliations were already paused) + cr.SetConditions(xpv1.ReconcilePaused()) + })), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetConnectionDetailsLastPublishedTime(&now) + cr.SetCompositionReference(&corev1.ObjectReference{}) + })), + }, opts: []ReconcilerOption{ - WithClient(&test.MockClient{ - MockGet: WithComposite(t, NewComposite(func(cr resource.Composite) { - // no annotation atm - // (but reconciliations were already paused) - cr.SetConditions(xpv1.ReconcilePaused()) - })), - MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { - cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) - cr.SetConnectionDetailsLastPublishedTime(&now) - cr.SetCompositionReference(&corev1.ObjectReference{}) - })), - }), WithCompositeFinalizer(resource.NewNopFinalizer()), WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { cr.SetCompositionReference(&corev1.ObjectReference{}) @@ -707,11 +708,11 @@ func TestReconcile(t *testing.T) { WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { return nil })), - WithComposer(ComposerFn(func(ctx context.Context, xr *composite.Unstructured, req CompositionRequest) (CompositionResult, error) { + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { return CompositionResult{}, nil })), WithConnectionPublishers(managed.ConnectionPublisherFns{ - PublishConnectionFn: func(ctx context.Context, o resource.ConnectionSecretOwner, got managed.ConnectionDetails) (published bool, err error) { + PublishConnectionFn: func(_ context.Context, _ resource.ConnectionSecretOwner, _ managed.ConnectionDetails) (published bool, err error) { return true, nil }, }), @@ -721,11 +722,593 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{RequeueAfter: defaultPollInterval}, }, }, + "CustomEventsAndConditions": { + reason: "We should emit custom events and set custom conditions that were returned by the composer on both the composite resource and the claim.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + return nil + } + if cm, ok := obj.(*claim.Unstructured); ok { + claim.New(claim.WithGroupVersionKind(schema.GroupVersionKind{})).DeepCopyInto(cm) + return nil + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions( + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }, + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + xpv1.ReconcileSuccess(), + xpv1.Available(), + ) + cr.(*composite.Unstructured).SetClaimConditionTypes("DatabaseReady") + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + // The composite should have the "Pipeline step" prefix. + Message: "Pipeline step \"some-function\": This is an event for database availability.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: claimKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + // The claim should not have the "Pipeline step" prefix. + Message: "This is an event for database availability.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + Message: "Pipeline step \"some-function\": Internal sync was successful.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "ComposeResources", + Message: "Successfully composed resources", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "This is an event for database availability.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + Message: "Internal sync was successful.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetComposite, + }, + }, + Conditions: []TargetedCondition{ + { + Condition: xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }, + Target: CompositionTargetCompositeAndClaim, + }, + { + Condition: xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + Target: CompositionTargetComposite, + }, + }, + }, nil + })), + }, + }, + want: want{ + r: reconcile.Result{RequeueAfter: defaultPollInterval}, + }, + }, + "CustomEventsAndConditionFatal": { + reason: "In the case of a fatal result from the composer, we should set all custom conditions that were seen. If any custom conditions were not seen, they should be marked as Unknown. The error message should be emitted as an event to the composite but not the claim.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + xr.SetConditions(xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }) + xr.SetClaimConditionTypes("DatabaseReady") + return nil + } + if cm, ok := obj.(*claim.Unstructured); ok { + claim.New(claim.WithGroupVersionKind(schema.GroupVersionKind{})).DeepCopyInto(cm) + return nil + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + + cr.SetConditions( + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionUnknown, + Reason: "FatalError", + Message: "A fatal error occurred before the status of this condition could be determined.", + }, + xpv1.ReconcileError(fmt.Errorf("cannot compose resources: %w", errBoom)), + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + ) + + cr.(*composite.Unstructured).SetClaimConditionTypes( + "DatabaseReady", + "BucketReady", + ) + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Warning("ComposeResources", fmt.Errorf("cannot compose resources: %w", errBoom)), + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "Pipeline step \"some-function\": This is an event for database availability.", + }, + }, + eventArgs{ + Kind: claimKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + // The claim should not have the "Pipeline step" prefix. + Message: "This is an event for database availability.", + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + // The composite should have the "Pipeline step" prefix. + Message: "Pipeline step \"some-function\": Internal sync was successful.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "EventNoDetail", + // The composite should not have the prefix as it had an empty + // detail. + Message: "This event should not contain a detail prefix.", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "This is an event for database availability.", + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "SyncSuccess", + Message: "Internal sync was successful.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetComposite, + }, + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "EventNoDetail", + Message: "This event should not contain a detail prefix.", + Annotations: map[string]string{}, + }, + Target: CompositionTargetComposite, + }, + }, + Conditions: []TargetedCondition{ + { + Condition: xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + Target: CompositionTargetComposite, + }, + { + Condition: xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, errBoom + })), + }, + }, + want: want{ + r: reconcile.Result{Requeue: true}, + }, + }, + "CustomConditionUpdate": { + reason: "Custom conditions should be updated if they already exist. Additionally, if a condition already exists in the status but was not included in the response, it should remain in the status.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + // The database condition already exists on the XR. + xr.SetConditions(xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }) + // The bucket began in a non-ready state. + xr.SetConditions(xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionFalse, + Reason: "Creating", + Message: "Waiting for bucket to be created.", + }) + + xr.SetClaimConditionTypes("DatabaseReady", "BucketReady") + return nil + } + if cm, ok := obj.(*claim.Unstructured); ok { + claim.New(claim.WithGroupVersionKind(schema.GroupVersionKind{})).DeepCopyInto(cm) + return nil + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions( + // The database condition should exist even though it was not seen + // during this reconcile. + xpv1.Condition{ + Type: "DatabaseReady", + Status: corev1.ConditionTrue, + Reason: "Available", + Message: "This is a condition for database availability.", + }, + // The bucket condition should be updated to reflect the latest + // condition which is available. + xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + xpv1.ReconcileSuccess(), + xpv1.Available(), + ) + cr.(*composite.Unstructured).SetClaimConditionTypes( + // The database claim condition should exist even though it was + // not seen during this reconcile. + "DatabaseReady", + "BucketReady", + ) + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "ComposeResources", + Message: "Successfully composed resources", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{}, + Conditions: []TargetedCondition{ + // The database condition is not added to the XR again. + { + Condition: xpv1.Condition{ + Type: "InternalSync", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "SyncSuccess", + Message: "This is a condition representing an internal sync process.", + ObservedGeneration: 0, + }, + Target: CompositionTargetComposite, + }, + // The bucket is now ready. + { + Condition: xpv1.Condition{ + Type: "BucketReady", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Time{}, + Reason: "Available", + Message: "This is a condition for bucket availability.", + ObservedGeneration: 0, + }, + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, nil + })), + }, + }, + want: want{ + r: reconcile.Result{RequeueAfter: defaultPollInterval}, + }, + }, + "CustomEventsFailToGetClaim": { + reason: "We should emit custom events that were returned by the composer. If we cannot get the claim, we should just emit events for the composite and continue as normal.", + args: args{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if xr, ok := obj.(*composite.Unstructured); ok { + // non-nil claim ref to trigger claim Get() + xr.SetClaimReference(&claim.Reference{}) + return nil + } + if _, ok := obj.(*claim.Unstructured); ok { + // something went wrong when getting the claim + return errBoom + } + return nil + }), + MockStatusUpdate: WantComposite(t, NewComposite(func(cr resource.Composite) { + cr.SetCompositionReference(&corev1.ObjectReference{}) + cr.SetConditions(xpv1.ReconcileSuccess(), xpv1.Available()) + cr.SetClaimReference(&claim.Reference{}) + })), + }, + opts: []ReconcilerOption{ + WithRecorder(newTestRecorder( + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "SelectComposition", + Message: "Successfully selected composition: ", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "Pipeline step \"some-function\": This is an event for database availability.", + Annotations: map[string]string{}, + }, + }, + eventArgs{ + Kind: compositeKind, + Event: event.Event{ + Type: event.Type(corev1.EventTypeNormal), + Reason: "ComposeResources", + Message: "Successfully composed resources", + Annotations: map[string]string{}, + }, + }, + )), + WithCompositeFinalizer(resource.NewNopFinalizer()), + WithCompositionSelector(CompositionSelectorFn(func(_ context.Context, cr resource.Composite) error { + cr.SetCompositionReference(&corev1.ObjectReference{}) + return nil + })), + WithCompositionRevisionFetcher(CompositionRevisionFetcherFn(func(_ context.Context, _ resource.Composite) (*v1.CompositionRevision, error) { + return &v1.CompositionRevision{}, nil + })), + WithCompositionRevisionValidator(CompositionRevisionValidatorFn(func(_ *v1.CompositionRevision) error { return nil })), + WithConfigurator(ConfiguratorFn(func(_ context.Context, _ resource.Composite, _ *v1.CompositionRevision) error { + return nil + })), + WithComposer(ComposerFn(func(_ context.Context, _ *composite.Unstructured, _ CompositionRequest) (CompositionResult, error) { + return CompositionResult{ + Composed: []ComposedResource{}, + ConnectionDetails: cd, + Events: []TargetedEvent{ + { + Event: event.Event{ + Type: event.TypeNormal, + Reason: "DatabaseAvailable", + Message: "This is an event for database availability.", + Annotations: map[string]string{}, + }, + Detail: "Pipeline step \"some-function\"", + Target: CompositionTargetCompositeAndClaim, + }, + }, + }, nil + })), + }, + }, + want: want{ + r: reconcile.Result{RequeueAfter: defaultPollInterval}, + }, + }, } for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, tc.args.of, tc.args.opts...) + r := NewReconciler(tc.args.client, tc.args.of, tc.args.opts...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { @@ -734,6 +1317,12 @@ func TestReconcile(t *testing.T) { if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) } + + if tr, ok := r.record.(*testRecorder); ok { + if diff := cmp.Diff(tr.Want, tr.Got, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want events, +got events:\n%s", tc.reason, diff) + } + } }) } } @@ -759,8 +1348,10 @@ func WithComposite(_ *testing.T, cr *composite.Unstructured) func(_ context.Cont } // A status update function that ensures the supplied object is the XR we want. -func WantComposite(t *testing.T, want resource.Composite) func(ctx context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { - return func(ctx context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { +func WantComposite(t *testing.T, want resource.Composite) func(_ context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() + return func(_ context.Context, got client.Object, _ ...client.SubResourceUpdateOption) error { + t.Helper() // Normally we use a custom Equal method on conditions to ignore the // lastTransitionTime, but we may be using unstructured types here where // the conditions are just a map[string]any. @@ -836,203 +1427,40 @@ func TestFilterToXRPatches(t *testing.T) { } } -func TestEnqueueForCompositionRevisionFunc(t *testing.T) { - type args struct { - of schema.GroupVersionKind - list func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error - event runtimeevent.CreateEvent - } - type want struct { - added []interface{} - } - - dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} - dogList := dog.GroupVersion().WithKind("DogList") - - tests := []struct { - name string - args args - want want - }{ - { - name: "empty", - args: args{ - of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - // test parameters only here, not in the later tests for brevity. - u, ok := list.(*kunstructured.UnstructuredList) - if !ok { - t.Errorf("list was not an UnstructuredList") - } else if got := u.GroupVersionKind(); got != dogList { - t.Errorf("list was not a DogList, got: %s", got) - } - if len(opts) != 0 { - t.Errorf("unexpected list options: %#v", opts) - } - return nil - }, - }, - }, - { - name: "automatic management policy", - args: args{ - of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.CreateEvent{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{ - added: []interface{}{reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: "ns", - Name: "obj1", - }}}, - }, - }, - { - name: "manual management policy", - args: args{ - of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateManual - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.CreateEvent{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{}, - }, - { - name: "other composition", - args: args{ - of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - policy := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&policy) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) - - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} - - return nil - }, - event: runtimeevent.CreateEvent{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{}, - }, - { - name: "multiple", - args: args{ - of: dog, - list: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - var obj1 composite.Unstructured - obj1.SetNamespace("ns") - obj1.SetName("obj1") - automatic := xpv1.UpdateAutomatic - obj1.SetCompositionUpdatePolicy(&automatic) - obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) - - obj2 := obj1.DeepCopy() - obj2.SetName("obj2") - - obj3 := obj1.DeepCopy() - obj3.SetName("obj3") - obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) +// Test types. +const ( + compositeKind = "Composite" + claimKind = "Claim" +) - obj4 := obj1.DeepCopy() - obj4.SetName("obj4") - manual := xpv1.UpdateManual - obj4.SetCompositionUpdatePolicy(&manual) +// testRecorder allows asserting event creation. +type testRecorder struct { + Want []eventArgs + Got []eventArgs +} - list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ - obj1.Unstructured, - obj2.Unstructured, - obj3.Unstructured, - } +type eventArgs struct { + Kind string + Event event.Event +} - return nil - }, - event: runtimeevent.CreateEvent{ - Object: &v1.CompositionRevision{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dachshund-sadfa8", - Labels: map[string]string{ - v1.LabelCompositionName: "dachshund", - }, - }, - }, - }, - }, - want: want{ - added: []interface{}{ - reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj1"}}, - reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj2"}}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fn := EnqueueForCompositionRevisionFunc(resource.CompositeKind(tt.args.of), tt.args.list, logging.NewNopLogger()) - q := rateLimitingQueueMock{} - fn(context.TODO(), tt.args.event, &q) - if got := q.added; !reflect.DeepEqual(got, tt.want.added) { - t.Errorf("EnqueueForCompositionRevisionFunc(...)(ctx, event, queue) = %v, want %v", got, tt.want) - } - }) +func (r *testRecorder) Event(obj runtime.Object, e event.Event) { + var kind string + switch obj.(type) { + case *composite.Unstructured: + kind = compositeKind + case *claim.Unstructured: + kind = claimKind } + r.Got = append(r.Got, eventArgs{Kind: kind, Event: e}) } -type rateLimitingQueueMock struct { - workqueue.RateLimitingInterface - added []interface{} +func (r *testRecorder) WithAnnotations(_ ...string) event.Recorder { + return r } -func (f *rateLimitingQueueMock) Add(item interface{}) { - f.added = append(f.added, item) +func newTestRecorder(expected ...eventArgs) *testRecorder { + return &testRecorder{ + Want: expected, + } } diff --git a/internal/controller/apiextensions/composite/watch/watch.go b/internal/controller/apiextensions/composite/watch/watch.go new file mode 100644 index 000000000..262e120b0 --- /dev/null +++ b/internal/controller/apiextensions/composite/watch/watch.go @@ -0,0 +1,153 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch implements a garbage collector for composed resource watches. +package watch + +import ( + "context" + "time" + + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + + "github.com/crossplane/crossplane/internal/engine" +) + +// A ControllerEngine can get and stop watches for a controller. +type ControllerEngine interface { + GetWatches(name string) ([]engine.WatchID, error) + StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + GetClient() client.Client +} + +// A GarbageCollector garbage collects watches for a single composite resource +// (XR) controller. A watch is eligible for garbage collection when none of the +// XRs the controller owns resource references its GVK. The garbage collector +// periodically lists all of the controller's XRs to determine what GVKs they +// still reference. +type GarbageCollector struct { + controllerName string + xrGVK schema.GroupVersionKind + + engine ControllerEngine + + log logging.Logger +} + +// A GarbageCollectorOption configures a GarbageCollector. +type GarbageCollectorOption func(gc *GarbageCollector) + +// WithLogger configures how a GarbageCollector should log. +func WithLogger(l logging.Logger) GarbageCollectorOption { + return func(gc *GarbageCollector) { + gc.log = l + } +} + +// NewGarbageCollector creates a new watch garbage collector for a controller. +func NewGarbageCollector(name string, of resource.CompositeKind, ce ControllerEngine, o ...GarbageCollectorOption) *GarbageCollector { + gc := &GarbageCollector{ + controllerName: name, + xrGVK: schema.GroupVersionKind(of), + engine: ce, + log: logging.NewNopLogger(), + } + for _, fn := range o { + fn(gc) + } + return gc +} + +// GarbageCollectWatches runs garbage collection at the specified interval, +// until the supplied context is cancelled. It stops any watches for resource +// types that are no longer composed by any composite resource (XR). +func (gc *GarbageCollector) GarbageCollectWatches(ctx context.Context, interval time.Duration) { + t := time.NewTicker(interval) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + gc.log.Debug("Stopping composite controller watch garbage collector", "error", ctx.Err()) + return + case <-t.C: + if err := gc.GarbageCollectWatchesNow(ctx); err != nil { + gc.log.Info("Cannot garbage collect composite controller watches", "error", err) + } + } + } +} + +// GarbageCollectWatchesNow stops any watches for resource types that are no +// longer composed by any composite resource (XR). It's safe to call from +// multiple goroutines. +func (gc *GarbageCollector) GarbageCollectWatchesNow(ctx context.Context) error { + // List all XRs of the type we're interested in. + l := &kunstructured.UnstructuredList{} + l.SetAPIVersion(gc.xrGVK.GroupVersion().String()) + l.SetKind(gc.xrGVK.Kind + "List") + if err := gc.engine.GetClient().List(ctx, l); err != nil { + return errors.Wrap(err, "cannot list composite resources") + } + + // Build the set of GVKs they still reference. + used := make(map[engine.WatchID]bool) + for _, u := range l.Items { + xr := &composite.Unstructured{Unstructured: u} + for _, ref := range xr.GetResourceReferences() { + used[engine.WatchID{Type: engine.WatchTypeComposedResource, GVK: schema.FromAPIVersionAndKind(ref.APIVersion, ref.Kind)}] = true + } + } + + // Get the set of running watches. + running, err := gc.engine.GetWatches(gc.controllerName) + if err != nil { + return errors.Wrap(err, "cannot get running watches") + } + + stop := make([]engine.WatchID, 0) + for _, wid := range running { + if !used[wid] { + stop = append(stop, wid) + } + } + + // No need to call StopWatches if there's nothing to stop. + if len(stop) == 0 { + return nil + } + + // Stop any watches that are running, but not used. + // + // It's possible watches were started or stopped since we called GetWatches. + // That's fine. Stopping a watch that doesn't exist is a no-op, and if a + // watch was started that needs garbage collecting we'll get it eventually + // when GC runs again. + // + // It's also possible we'll stop a watch that's actually in use, if an XR + // started composing its GVK between when we built the map of used watches + // and here where we call StopWatches. It'll restart next time the XR + // controller calls StartWatches. + _, err = gc.engine.StopWatches(ctx, gc.controllerName, stop...) + return errors.Wrap(err, "cannot stop watches for composed resource types that are no longer referenced by any composite resource") +} diff --git a/internal/controller/apiextensions/composite/watch/watch_test.go b/internal/controller/apiextensions/composite/watch/watch_test.go new file mode 100644 index 000000000..af72b5129 --- /dev/null +++ b/internal/controller/apiextensions/composite/watch/watch_test.go @@ -0,0 +1,244 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + "github.com/crossplane/crossplane-runtime/pkg/test" + + "github.com/crossplane/crossplane/internal/engine" +) + +var _ ControllerEngine = &MockEngine{} + +type MockEngine struct { + MockGetWatches func(name string) ([]engine.WatchID, error) + MockStopWatches func(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + MockGetClient func() client.Client +} + +func (m *MockEngine) GetWatches(name string) ([]engine.WatchID, error) { + return m.MockGetWatches(name) +} + +func (m *MockEngine) StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) { + return m.MockStopWatches(ctx, name, ws...) +} + +func (m *MockEngine) GetClient() client.Client { + return m.MockGetClient() +} + +func TestGarbageCollectWatchesNow(t *testing.T) { + errBoom := errors.New("boom") + + type params struct { + name string + of resource.CompositeKind + ce ControllerEngine + o []GarbageCollectorOption + } + type args struct { + ctx context.Context + } + type want struct { + err error + } + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "ListXRsError": { + reason: "The method should return an error if it can't list XRs.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(errBoom), + } + }, + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "GetWatchesError": { + reason: "The method should return an error if it can't get watches.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + return nil, errBoom + }, + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "StopWatchesError": { + reason: "The method should return an error if it can't stop watches.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + w := []engine.WatchID{ + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{}, + }, + } + return w, nil + }, + MockStopWatches: func(_ context.Context, _ string, _ ...engine.WatchID) (int, error) { + return 0, errBoom + }, + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "NothingToStop": { + reason: "StopWatches shouldn't be called if there's no watches to stop.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + return nil, nil + }, + // StopWatches would panic if called, since it's not mocked. + }, + }, + want: want{ + err: nil, + }, + }, + "UneededWatchesStopped": { + reason: "StopWatches shouldn't be called if there's no watches to stop.", + params: params{ + ce: &MockEngine{ + MockGetClient: func() client.Client { + return &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + xr := composite.New() + xr.SetResourceReferences([]corev1.ObjectReference{ + { + APIVersion: "example.org/v1", + Kind: "StillComposed", + // Name doesn't matter. + }, + }) + + obj.(*unstructured.UnstructuredList).Items = []unstructured.Unstructured{xr.Unstructured} + + return nil + }), + } + }, + MockGetWatches: func(_ string) ([]engine.WatchID, error) { + w := []engine.WatchID{ + // We want to keep this one. + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "example.org", + Version: "v1", + Kind: "StillComposed", + }, + }, + // We want to GC this one. + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "example.org", + Version: "v1", + Kind: "GarbageCollectMe", + }, + }, + } + return w, nil + }, + MockStopWatches: func(_ context.Context, _ string, ws ...engine.WatchID) (int, error) { + want := []engine.WatchID{ + { + Type: engine.WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "example.org", + Version: "v1", + Kind: "GarbageCollectMe", + }, + }, + } + + if diff := cmp.Diff(want, ws); diff != "" { + t.Errorf("\nMockStopWatches(...) -want, +got:\n%s", diff) + } + + return 0, nil + }, + }, + o: []GarbageCollectorOption{ + WithLogger(logging.NewNopLogger()), + }, + }, + want: want{ + err: nil, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + gc := NewGarbageCollector(tc.params.name, tc.params.of, tc.params.ce, tc.params.o...) + err := gc.GarbageCollectWatchesNow(tc.args.ctx) + + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ngc.GarbageCollectWatchesNow(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/controller/apiextensions/composition/fuzz_test.go b/internal/controller/apiextensions/composition/fuzz_test.go index 99bd21ae4..32e50ca44 100644 --- a/internal/controller/apiextensions/composition/fuzz_test.go +++ b/internal/controller/apiextensions/composition/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzNewCompositionRevision(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { f := fuzz.NewConsumer(data) c := &v1.Composition{} f.GenerateStruct(c) diff --git a/internal/controller/apiextensions/composition/reconciler.go b/internal/controller/apiextensions/composition/reconciler.go index 6adef545b..92d771421 100644 --- a/internal/controller/apiextensions/composition/reconciler.go +++ b/internal/controller/apiextensions/composition/reconciler.go @@ -36,7 +36,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" @@ -46,7 +45,7 @@ const ( timeout = 2 * time.Minute ) -// Error strings +// Error strings. const ( errGet = "cannot get Composition" errListRevs = "cannot list CompositionRevisions" @@ -98,10 +97,8 @@ func WithRecorder(er event.Recorder) ReconcilerOption { // NewReconciler returns a Reconciler of Compositions. func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - r := &Reconciler{ - client: kube, + client: mgr.GetClient(), log: logging.NewNopLogger(), record: event.NewNopRecorder(), } @@ -122,7 +119,7 @@ type Reconciler struct { } // Reconcile a Composition. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Only slightly over (12). +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := r.log.WithValues("request", req) log.Debug("Reconciling") diff --git a/internal/controller/apiextensions/composition/reconciler_test.go b/internal/controller/apiextensions/composition/reconciler_test.go index 96bc63627..7529668fe 100644 --- a/internal/controller/apiextensions/composition/reconciler_test.go +++ b/internal/controller/apiextensions/composition/reconciler_test.go @@ -249,7 +249,7 @@ func TestReconcile(t *testing.T) { *obj.(*v1.Composition) = *compDev return nil }), - MockList: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { if len(opts) < 1 || opts[0].(client.MatchingLabels)[v1.LabelCompositionName] != compDev.Name { t.Errorf("unexpected list options: %v", opts) } @@ -287,7 +287,7 @@ func TestReconcile(t *testing.T) { *obj.(*v1.Composition) = *compDev return nil }), - MockList: func(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, obj client.ObjectList, opts ...client.ListOption) error { if len(opts) < 1 || opts[0].(client.MatchingLabels)[v1.LabelCompositionName] != compDev.Name { t.Errorf("unexpected list options: %v", opts) } diff --git a/internal/controller/apiextensions/controller/options.go b/internal/controller/apiextensions/controller/options.go index 57c42c723..f0fe3799b 100644 --- a/internal/controller/apiextensions/controller/options.go +++ b/internal/controller/apiextensions/controller/options.go @@ -20,6 +20,7 @@ package controller import ( "github.com/crossplane/crossplane-runtime/pkg/controller" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/xfn" ) @@ -27,6 +28,9 @@ import ( type Options struct { controller.Options + // ControllerEngine used to dynamically start and stop controllers. + ControllerEngine *engine.ControllerEngine + // FunctionRunner used to run Composition Functions. FunctionRunner *xfn.PackagedFunctionRunner } diff --git a/internal/controller/apiextensions/definition/composed.go b/internal/controller/apiextensions/definition/composed.go deleted file mode 100644 index 1fb643eee..000000000 --- a/internal/controller/apiextensions/definition/composed.go +++ /dev/null @@ -1,283 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package definition - -import ( - "context" - "strings" - "sync" - - "github.com/google/uuid" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - kcache "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - cache "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/cluster" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - "github.com/crossplane/crossplane-runtime/pkg/controller" - "github.com/crossplane/crossplane-runtime/pkg/logging" - - "github.com/crossplane/crossplane/internal/xcrd" -) - -// composedResourceInformers manages composed resource informers referenced by -// composite resources. It serves as an event source for realtime notifications -// of changed composed resources, with the composite reconcilers as sinks. -// It keeps composed resource informers alive as long as there are composites -// referencing them. In parallel, the composite reconcilers keep track of -// references to composed resources, and inform composedResourceInformers about -// them via the WatchComposedResources method. -type composedResourceInformers struct { - log logging.Logger - cluster cluster.Cluster - - gvkRoutedCache *controller.GVKRoutedCache - - lock sync.RWMutex // everything below is protected by this lock - - // cdCaches holds the composed resource informers. These are dynamically - // started and stopped based on the composites that reference them. - cdCaches map[schema.GroupVersionKind]cdCache - // xrCaches holds the composite resource informers. We use them to find - // composites referencing a certain composed resource GVK. If no composite - // is left doing so, a composed resource informer is stopped. - xrCaches map[schema.GroupVersionKind]cache.Cache - sinks map[string]func(ev runtimeevent.UpdateEvent) // by some uid -} - -type cdCache struct { - cache cache.Cache - cancelFn context.CancelFunc -} - -var _ source.Source = &composedResourceInformers{} - -// Start implements source.Source, i.e. starting composedResourceInformers as -// source with h as the sink of update events. It keeps sending events until -// ctx is done. -// Note that Start can be called multiple times to deliver events to multiple -// (composite resource) controllers. -func (i *composedResourceInformers) Start(ctx context.Context, h handler.EventHandler, q workqueue.RateLimitingInterface, ps ...predicate.Predicate) error { - id := uuid.New().String() - - i.lock.Lock() - defer i.lock.Unlock() - i.sinks[id] = func(ev runtimeevent.UpdateEvent) { - for _, p := range ps { - if !p.Update(ev) { - return - } - } - h.Update(ctx, ev, q) - } - - go func() { - <-ctx.Done() - - i.lock.Lock() - defer i.lock.Unlock() - delete(i.sinks, id) - }() - - return nil -} - -// RegisterComposite registers a composite resource cache with its GVK. -// Instances of this GVK will be considered to keep composed resource informers -// alive. -func (i *composedResourceInformers) RegisterComposite(gvk schema.GroupVersionKind, ca cache.Cache) { - i.lock.Lock() - defer i.lock.Unlock() - - if i.xrCaches == nil { - i.xrCaches = make(map[schema.GroupVersionKind]cache.Cache) - } - i.xrCaches[gvk] = ca -} - -// UnregisterComposite removes a composite resource cache from being considered -// to keep composed resource informers alive. -func (i *composedResourceInformers) UnregisterComposite(gvk schema.GroupVersionKind) { - i.lock.Lock() - defer i.lock.Unlock() - delete(i.xrCaches, gvk) -} - -// WatchComposedResources starts informers for the given composed resource GVKs. -// The is wired into the composite reconciler, which will call this method on -// every reconcile to make composedResourceInformers aware of the composed -// resources the given composite resource references. -// -// Note that this complements cleanupComposedResourceInformers which regularly -// garbage collects composed resource informers that are no longer referenced by -// any composite. -func (i *composedResourceInformers) WatchComposedResources(gvks ...schema.GroupVersionKind) { - i.lock.RLock() - defer i.lock.RUnlock() - - // start new informers - for _, gvk := range gvks { - if _, found := i.cdCaches[gvk]; found { - continue - } - - log := i.log.WithValues("gvk", gvk.String()) - - ca, err := cache.New(i.cluster.GetConfig(), cache.Options{}) - if err != nil { - log.Debug("failed creating a cache", "error", err) - continue - } - - // don't forget to call cancelFn in error cases to avoid leaks. In the - // happy case it's called from the go routine starting the cache below. - ctx, cancelFn := context.WithCancel(context.Background()) - - u := kunstructured.Unstructured{} - u.SetGroupVersionKind(gvk) - inf, err := ca.GetInformer(ctx, &u, cache.BlockUntilSynced(false)) // don't block. We wait in the go routine below. - if err != nil { - cancelFn() - log.Debug("failed getting informer", "error", err) - continue - } - - if _, err := inf.AddEventHandler(kcache.ResourceEventHandlerFuncs{ - UpdateFunc: func(oldObj, newObj interface{}) { - old := oldObj.(client.Object) - obj := newObj.(client.Object) - if old.GetResourceVersion() == obj.GetResourceVersion() { - return - } - - i.lock.RLock() - defer i.lock.RUnlock() - - ev := runtimeevent.UpdateEvent{ - ObjectOld: old, - ObjectNew: obj, - } - for _, handleFn := range i.sinks { - handleFn(ev) - } - }, - }); err != nil { - cancelFn() - log.Debug("failed adding event handler", "error", err) - continue - } - - go func() { - defer cancelFn() - - log.Info("Starting composed resource watch") - _ = ca.Start(ctx) - }() - - i.cdCaches[gvk] = cdCache{ - cache: ca, - cancelFn: cancelFn, - } - - // wait for in the background, and only when synced add to the routed cache - go func(gvk schema.GroupVersionKind) { - if synced := ca.WaitForCacheSync(ctx); synced { - log.Debug("Composed resource cache synced") - i.gvkRoutedCache.AddDelegate(gvk, ca) - } - }(gvk) - } -} - -// cleanupComposedResourceInformers garbage collects composed resource informers -// that are no longer referenced by any composite resource. -// -// Note that this complements WatchComposedResources which starts informers for -// the composed resources referenced by a composite resource. -func (i *composedResourceInformers) cleanupComposedResourceInformers(ctx context.Context) { //nolint:gocyclo // splitting it doesn't make it easier to read. - crds := extv1.CustomResourceDefinitionList{} - if err := i.cluster.GetClient().List(ctx, &crds); err != nil { - i.log.Debug(errListCRDs, "error", err) - return - } - - // copy map to avoid locking it for the entire duration of the loop - xrCaches := make(map[schema.GroupVersionKind]cache.Cache, len(i.xrCaches)) - i.lock.RLock() - for gvk, ca := range i.xrCaches { - xrCaches[gvk] = ca - } - i.lock.RUnlock() - - // find all CRDs that some XR is referencing. This is O(CRDs * XRDs * versions). - // In practice, CRDs are 1000ish max, and XRDs are 10ish. So this is - // fast enough for now. It's all in-memory. - referenced := make(map[schema.GroupVersionKind]bool) - for _, crd := range crds.Items { - crd := crd - - if !xcrd.IsEstablished(crd.Status) { - continue - } - - for _, v := range crd.Spec.Versions { - cdGVK := schema.GroupVersionKind{Group: crd.Spec.Group, Version: v.Name, Kind: crd.Spec.Names.Kind} - for xrGVK, xrCache := range xrCaches { - // list composites that reference this composed GVK - list := kunstructured.UnstructuredList{} - list.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) - if err := xrCache.List(ctx, &list, client.MatchingFields{compositeResourceRefGVKsIndex: cdGVK.String()}); err != nil { - i.log.Debug("cannot list composite resources referencing a certain composed resource GVK", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourceRefGVKsIndex+"="+cdGVK.String()) - continue - } - if len(list.Items) > 0 { - referenced[cdGVK] = true - } - } - } - } - - // stop old informers - for gvk, inf := range i.cdCaches { - if referenced[gvk] { - continue - } - inf.cancelFn() - i.gvkRoutedCache.RemoveDelegate(gvk) - i.log.Info("Stopped composed resource watch", "gvk", gvk.String()) - delete(i.cdCaches, gvk) - } -} - -func parseAPIVersion(v string) (string, string) { - parts := strings.SplitN(v, "/", 2) - switch len(parts) { - case 1: - return "", parts[0] - case 2: - return parts[0], parts[1] - default: - return "", "" - } -} diff --git a/internal/controller/apiextensions/definition/handlers.go b/internal/controller/apiextensions/definition/handlers.go new file mode 100644 index 000000000..d6f3fb1de --- /dev/null +++ b/internal/controller/apiextensions/definition/handlers.go @@ -0,0 +1,115 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package definition + +import ( + "context" + + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + kevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +// EnqueueForCompositionRevision enqueues reconciles for all XRs that will use a +// newly created CompositionRevision. +func EnqueueForCompositionRevision(of resource.CompositeKind, c client.Reader, log logging.Logger) handler.Funcs { + return handler.Funcs{ + CreateFunc: func(ctx context.Context, e kevent.CreateEvent, q workqueue.RateLimitingInterface) { + rev, ok := e.Object.(*v1.CompositionRevision) + if !ok { + // should not happen + return + } + + // TODO(negz): Check whether the revision's compositeTypeRef matches + // the supplied CompositeKind. If it doesn't, we can return early. + + // get all XRs + xrs := kunstructured.UnstructuredList{} + xrs.SetGroupVersionKind(schema.GroupVersionKind(of)) + xrs.SetKind(schema.GroupVersionKind(of).Kind + "List") + // TODO(negz): Index XRs by composition revision name? + if err := c.List(ctx, &xrs); err != nil { + // logging is most we can do here. This is a programming error if it happens. + log.Info("cannot list in CompositionRevision handler", "type", schema.GroupVersionKind(of).String(), "error", err) + return + } + + // enqueue all those that reference the Composition of this revision + compName := rev.Labels[v1.LabelCompositionName] + // TODO(negz): Check this before we get all XRs. + if compName == "" { + return + } + for _, u := range xrs.Items { + xr := composite.Unstructured{Unstructured: u} + + // only automatic + if pol := xr.GetCompositionUpdatePolicy(); pol != nil && *pol == xpv1.UpdateManual { + continue + } + + // only those that reference the right Composition + if ref := xr.GetCompositionReference(); ref == nil || ref.Name != compName { + continue + } + + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: xr.GetName(), + Namespace: xr.GetNamespace(), + }}) + } + }, + } +} + +// EnqueueCompositeResources enqueues reconciles for all XRs that reference an +// updated composed resource. +func EnqueueCompositeResources(of resource.CompositeKind, c client.Reader, log logging.Logger) handler.Funcs { + return handler.Funcs{ + UpdateFunc: func(ctx context.Context, ev kevent.UpdateEvent, q workqueue.RateLimitingInterface) { + xrGVK := schema.GroupVersionKind(of) + cdGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() + key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), cdGVK.Kind, cdGVK.GroupVersion().String()) + + composites := kunstructured.UnstructuredList{} + composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) + if err := c.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { + log.Debug("cannot list composite resources related to a composed resource change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) + return + } + + // queue those composites for reconciliation + for _, xr := range composites.Items { + log.Debug("Enqueueing composite resource because composed resource changed", "name", xr.GetName(), "cdGVK", cdGVK.String(), "cdName", ev.ObjectNew.GetName()) + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) + } + }, + } +} diff --git a/internal/controller/apiextensions/definition/handlers_test.go b/internal/controller/apiextensions/definition/handlers_test.go new file mode 100644 index 000000000..9992c0931 --- /dev/null +++ b/internal/controller/apiextensions/definition/handlers_test.go @@ -0,0 +1,237 @@ +package definition + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + kevent "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" + "github.com/crossplane/crossplane-runtime/pkg/test" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +func TestEnqueueForCompositionRevisionFunc(t *testing.T) { + type args struct { + of schema.GroupVersionKind + reader client.Reader + event kevent.CreateEvent + } + type want struct { + added []interface{} + } + + dog := schema.GroupVersionKind{Group: "example.com", Version: "v1", Kind: "Dog"} + dogList := dog.GroupVersion().WithKind("DogList") + + tests := map[string]struct { + reason string + args args + want want + }{ + "NoXRs": { + reason: "If there are no XRs of the specified type, no reconciles should be enqueued.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { + // test parameters only here, not in the later tests for brevity. + u, ok := list.(*kunstructured.UnstructuredList) + if !ok { + t.Errorf("list was not an UnstructuredList") + } else if got := u.GroupVersionKind(); got != dogList { + t.Errorf("list was not a DogList, got: %s", got) + } + if len(opts) != 0 { + t.Errorf("unexpected list options: %#v", opts) + } + return nil + }, + }, + }, + }, + "AutomaticManagementPolicy": { + reason: "A reconcile should be enqueued for XRs with an automatic revision update policy.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{ + added: []interface{}{reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: "ns", + Name: "obj1", + }}}, + }, + }, + "ManualManagementPolicy": { + reason: "A reconcile shouldn't be enqueued for XRs with a manual revision update policy.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateManual + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{}, + }, + "OtherComposition": { + reason: "A reconcile shouldn't be enqueued for an XR that references a different Composition", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + policy := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&policy) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{obj1.Unstructured} + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{}, + }, + "Multiple": { + reason: "Reconciles should be enqueued only for the XRs that reference the relevant Composition, and have an automatic composition revision update policy.", + args: args{ + of: dog, + reader: &test.MockClient{ + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + var obj1 composite.Unstructured + obj1.SetNamespace("ns") + obj1.SetName("obj1") + automatic := xpv1.UpdateAutomatic + obj1.SetCompositionUpdatePolicy(&automatic) + obj1.SetCompositionReference(&corev1.ObjectReference{Name: "dachshund"}) + + obj2 := obj1.DeepCopy() + obj2.SetName("obj2") + + obj3 := obj1.DeepCopy() + obj3.SetName("obj3") + obj3.SetCompositionReference(&corev1.ObjectReference{Name: "bernese"}) + + obj4 := obj1.DeepCopy() + obj4.SetName("obj4") + manual := xpv1.UpdateManual + obj4.SetCompositionUpdatePolicy(&manual) + + list.(*kunstructured.UnstructuredList).Items = []kunstructured.Unstructured{ + obj1.Unstructured, + obj2.Unstructured, + obj3.Unstructured, + } + + return nil + }, + }, + event: kevent.CreateEvent{ + Object: &v1.CompositionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dachshund-sadfa8", + Labels: map[string]string{ + v1.LabelCompositionName: "dachshund", + }, + }, + }, + }, + }, + want: want{ + added: []interface{}{ + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj1"}}, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "ns", Name: "obj2"}}, + }, + }, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + fns := EnqueueForCompositionRevision(resource.CompositeKind(tc.args.of), tc.args.reader, logging.NewNopLogger()) + q := rateLimitingQueueMock{} + fns.Create(context.TODO(), tc.args.event, &q) + + if diff := cmp.Diff(tc.want.added, q.added); diff != "" { + t.Errorf("\n%s\nfns.Create(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} + +type rateLimitingQueueMock struct { + workqueue.RateLimitingInterface + added []interface{} +} + +func (f *rateLimitingQueueMock) Add(item interface{}) { + f.added = append(f.added, item) +} diff --git a/internal/controller/apiextensions/definition/indexes.go b/internal/controller/apiextensions/definition/indexes.go index 94074885f..bfb622e0f 100644 --- a/internal/controller/apiextensions/definition/indexes.go +++ b/internal/controller/apiextensions/definition/indexes.go @@ -17,54 +17,21 @@ limitations under the License. package definition import ( - "context" "fmt" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" ) const ( - // compositeResourceRefGVKsIndex is an index of all GroupKinds that - // are in use by a Composition. It indexes from spec.resourceRefs, not - // from spec.resources. Hence, it will also work with functions. - compositeResourceRefGVKsIndex = "compositeResourceRefsGVKs" // compositeResourcesRefsIndex is an index of resourceRefs that are owned // by a composite. compositeResourcesRefsIndex = "compositeResourcesRefs" ) -var ( - _ client.IndexerFunc = IndexCompositeResourceRefGVKs - _ client.IndexerFunc = IndexCompositeResourcesRefs -) - -// IndexCompositeResourceRefGVKs assumes the passed object is a composite. It -// returns gvk keys for every resource referenced in the composite. -func IndexCompositeResourceRefGVKs(o client.Object) []string { - u, ok := o.(*kunstructured.Unstructured) - if !ok { - return nil // should never happen - } - xr := composite.Unstructured{Unstructured: *u} - refs := xr.GetResourceReferences() - keys := make([]string, 0, len(refs)) - for _, ref := range refs { - group, version := parseAPIVersion(ref.APIVersion) - keys = append(keys, schema.GroupVersionKind{Group: group, Version: version, Kind: ref.Kind}.String()) - } - // unification is done by the informer. - return keys -} +var _ client.IndexerFunc = IndexCompositeResourcesRefs // IndexCompositeResourcesRefs assumes the passed object is a composite. It // returns keys for every composed resource referenced in the composite. @@ -85,23 +52,3 @@ func IndexCompositeResourcesRefs(o client.Object) []string { func refKey(ns, name, kind, apiVersion string) string { return fmt.Sprintf("%s.%s.%s.%s", name, ns, kind, apiVersion) } - -func enqueueXRsForMR(ca cache.Cache, xrGVK schema.GroupVersionKind, log logging.Logger) func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - return func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - mrGVK := ev.ObjectNew.GetObjectKind().GroupVersionKind() - key := refKey(ev.ObjectNew.GetNamespace(), ev.ObjectNew.GetName(), mrGVK.Kind, mrGVK.GroupVersion().String()) - - composites := kunstructured.UnstructuredList{} - composites.SetGroupVersionKind(xrGVK.GroupVersion().WithKind(xrGVK.Kind + "List")) - if err := ca.List(ctx, &composites, client.MatchingFields{compositeResourcesRefsIndex: key}); err != nil { - log.Debug("cannot list composite resources related to a MR change", "error", err, "gvk", xrGVK.String(), "fieldSelector", compositeResourcesRefsIndex+"="+key) - return - } - - // queue those composites for reconciliation - for _, xr := range composites.Items { - log.Info("Enqueueing composite resource because managed resource changed", "name", xr.GetName(), "mrGVK", mrGVK.String(), "mrName", ev.ObjectNew.GetName()) - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: xr.GetName()}}) - } - } -} diff --git a/internal/controller/apiextensions/definition/indexes_test.go b/internal/controller/apiextensions/definition/indexes_test.go index 3e725ddc0..40fc1315e 100644 --- a/internal/controller/apiextensions/definition/indexes_test.go +++ b/internal/controller/apiextensions/definition/indexes_test.go @@ -25,50 +25,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestIndexCompositeResourceRefGVKs(t *testing.T) { - type args struct { - object client.Object - } - tests := map[string]struct { - args args - want []string - }{ - "Nil": {args: args{object: nil}, want: nil}, - "NotUnstructured": {args: args{object: &corev1.Pod{}}, want: nil}, - "NoRefs": {args: args{object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{}, - }, - }}, want: []string{}}, - "References": {args: args{object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]interface{}{ - "resourceRefs": []interface{}{ - map[string]interface{}{ - "apiVersion": "nop.crossplane.io/v1alpha1", - "kind": "NopResource", - "name": "mr", - }, - map[string]interface{}{ - "apiVersion": "nop.example.org/v1alpha1", - "kind": "NopResource", - "name": "xr", - }, - }, - }, - }, - }}, want: []string{"nop.crossplane.io/v1alpha1, Kind=NopResource", "nop.example.org/v1alpha1, Kind=NopResource"}}, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := IndexCompositeResourceRefGVKs(tc.args.object) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nIndexCompositeResourceRefGVKs(...): -want, +got:\n%s", name, diff) - } - }) - } -} - func TestIndexCompositeResourcesRefs(t *testing.T) { type args struct { object client.Object diff --git a/internal/controller/apiextensions/definition/reconciler.go b/internal/controller/apiextensions/definition/reconciler.go index 4ae28275b..6c5ca40bc 100644 --- a/internal/controller/apiextensions/definition/reconciler.go +++ b/internal/controller/apiextensions/definition/reconciler.go @@ -28,19 +28,13 @@ import ( kmeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - runtimeevent "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/crossplane/crossplane-runtime/pkg/connection" "github.com/crossplane/crossplane-runtime/pkg/controller" @@ -51,12 +45,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/composite" + "github.com/crossplane/crossplane/internal/controller/apiextensions/composite/watch" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/xcrd" ) @@ -71,6 +66,8 @@ const ( errApplyCRD = "cannot apply rendered composite resource CustomResourceDefinition" errUpdateStatus = "cannot update status of CompositeResourceDefinition" errStartController = "cannot start composite resource controller" + errStopController = "cannot stop composite resource controller" + errStartWatches = "cannot start composite resource controller watches" errAddIndex = "cannot add composite GVK index" errAddFinalizer = "cannot add composite resource finalizer" errRemoveFinalizer = "cannot remove composite resource finalizer" @@ -95,12 +92,50 @@ const ( ) // A ControllerEngine can start and stop Kubernetes controllers on demand. +// +//nolint:interfacebloat // We use this interface to stub the engine for testing, and we need all of its functionality. type ControllerEngine interface { + Start(name string, o ...engine.ControllerOption) error + Stop(ctx context.Context, name string) error IsRunning(name string) bool - Create(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) - Start(name string, o kcontroller.Options, w ...controller.Watch) error - Stop(name string) - Err(name string) error + GetWatches(name string) ([]engine.WatchID, error) + StartWatches(name string, ws ...engine.Watch) error + StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + GetClient() client.Client + GetFieldIndexer() client.FieldIndexer +} + +// A NopEngine does nothing. +type NopEngine struct{} + +// Start does nothing. +func (e *NopEngine) Start(_ string, _ ...engine.ControllerOption) error { return nil } + +// Stop does nothing. +func (e *NopEngine) Stop(_ context.Context, _ string) error { return nil } + +// IsRunning always returns true. +func (e *NopEngine) IsRunning(_ string) bool { return true } + +// GetWatches does nothing. +func (e *NopEngine) GetWatches(_ string) ([]engine.WatchID, error) { return nil, nil } + +// StartWatches does nothing. +func (e *NopEngine) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// StopWatches does nothing. +func (e *NopEngine) StopWatches(_ context.Context, _ string, _ ...engine.WatchID) (int, error) { + return 0, nil +} + +// GetClient returns a nil client. +func (e *NopEngine) GetClient() client.Client { + return nil +} + +// GetFieldIndexer returns a nil field indexer. +func (e *NopEngine) GetFieldIndexer() client.FieldIndexer { + return nil } // A CRDRenderer renders a CompositeResourceDefinition's corresponding @@ -124,28 +159,16 @@ func (fn CRDRenderFn) Render(d *v1.CompositeResourceDefinition) (*extv1.CustomRe func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { name := "defined/" + strings.ToLower(v1.CompositeResourceDefinitionGroupKind) - r := NewReconciler(mgr, + r := NewReconciler(NewClientApplicator(mgr.GetClient()), WithLogger(o.Logger.WithValues("controller", name)), WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + WithControllerEngine(o.ControllerEngine), WithOptions(o)) - if o.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // Register a runnable regularly checking whether the watch composed - // resources are still referenced by composite resources. If not, the - // composed resource informer is stopped. - if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { - // Run every minute. - wait.UntilWithContext(ctx, r.xrInformers.cleanupComposedResourceInformers, time.Minute) - return nil - })); err != nil { - return errors.Wrap(err, errCannotAddInformerLoopToManager) - } - } - return ctrl.NewControllerManagedBy(mgr). Named(name). For(&v1.CompositeResourceDefinition{}). - Owns(&extv1.CustomResourceDefinition{}). + Owns(&extv1.CustomResourceDefinition{}, builder.WithPredicates(resource.NewPredicates(IsCompositeResourceCRD()))). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) } @@ -157,7 +180,6 @@ type ReconcilerOption func(*Reconciler) func WithLogger(log logging.Logger) ReconcilerOption { return func(r *Reconciler) { r.log = log - r.xrInformers.log = log } } @@ -188,7 +210,7 @@ func WithFinalizer(f resource.Finalizer) ReconcilerOption { // lifecycles of composite controllers. func WithControllerEngine(c ControllerEngine) ReconcilerOption { return func(r *Reconciler) { - r.composite.ControllerEngine = c + r.engine = c } } @@ -200,48 +222,29 @@ func WithCRDRenderer(c CRDRenderer) ReconcilerOption { } } -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca - } -} - type definition struct { CRDRenderer - ControllerEngine resource.Finalizer } -// NewReconciler returns a Reconciler of CompositeResourceDefinitions. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - - ca := controller.NewGVKRoutedCache(mgr.GetScheme(), mgr.GetCache()) +// NewClientApplicator returns a ClientApplicator suitable for use by the +// definition controller. +func NewClientApplicator(c client.Client) resource.ClientApplicator { + // TODO(negz): Use server-side apply instead of a ClientApplicator. + return resource.ClientApplicator{Client: c, Applicator: resource.NewAPIUpdatingApplicator(c)} +} +// NewReconciler returns a Reconciler of CompositeResourceDefinitions. +func NewReconciler(ca resource.ClientApplicator, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - mgr: mgr, - - client: resource.ClientApplicator{ - Client: kube, - Applicator: resource.NewAPIUpdatingApplicator(kube), - }, + client: ca, composite: definition{ - CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), - ControllerEngine: controller.NewEngine(mgr), - Finalizer: resource.NewAPIFinalizer(kube, finalizer), + CRDRenderer: CRDRenderFn(xcrd.ForCompositeResource), + Finalizer: resource.NewAPIFinalizer(ca, finalizer), }, - xrInformers: composedResourceInformers{ - log: logging.NewNopLogger(), - cluster: mgr, - - gvkRoutedCache: ca, - cdCaches: make(map[schema.GroupVersionKind]cdCache), - sinks: make(map[string]func(ev runtimeevent.UpdateEvent)), - }, + engine: &NopEngine{}, log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -255,33 +258,30 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { f(r) } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // wrap the manager's cache to route requests to dynamically started - // informers for managed resources. - r.mgr = controller.WithGVKRoutedCache(ca, mgr) - } - return r } // A Reconciler reconciles CompositeResourceDefinitions. type Reconciler struct { + // This client should only be used by this XRD controller, not the XR + // controllers it manages. XR controllers should use the engine's client. + // This ensures XR controllers will use a client backed by the same cache + // used to power their watches. client resource.ClientApplicator - mgr manager.Manager composite definition + engine ControllerEngine + log logging.Logger record event.Recorder - xrInformers composedResourceInformers - options apiextensionscontroller.Options } // Reconcile a CompositeResourceDefinition by defining a new kind of composite // resource and starting a controller to reconcile it. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are complex. Be wary of adding more. log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -336,11 +336,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // the (presumably exceedingly rare) latter case we'll orphan // the CRD. if !meta.WasCreated(crd) || !metav1.IsControlledBy(crd, d) { - // It's likely that we've already stopped this - // controller on a previous reconcile, but we try again - // just in case. This is a no-op if the controller was - // already stopped. - r.composite.Stop(composite.ControllerName(d.GetName())) + // It's likely that we've already stopped this controller on a + // previous reconcile, but we try again just in case. This is a + // no-op if the controller was already stopped. + if err := r.engine.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonTerminateXR, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource controller") if err := r.composite.RemoveFinalizer(ctx, d); err != nil { @@ -390,9 +393,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - // The controller should be stopped before the deletion of CRD - // so that it doesn't crash. - r.composite.Stop(composite.ControllerName(d.GetName())) + // The controller must be stopped before the deletion of the CRD so that + // it doesn't crash. + if err := r.engine.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonTerminateXR, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -440,85 +447,80 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - if err := r.composite.Err(composite.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error", "error", err) - } - observed := d.Status.Controllers.CompositeResourceTypeRef desired := v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) if observed.APIVersion != "" && observed != desired { - r.composite.Stop(composite.ControllerName(d.GetName())) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.UnregisterComposite(d.GetCompositeGroupVersionKind()) + if err := r.engine.Stop(ctx, composite.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonEstablishXR, err)) + return reconcile.Result{}, err } log.Debug("Referenceable version changed; stopped composite resource controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) } - ro := CompositeReconcilerOptions(r.options, d, r.client, r.log, r.record) - ck := resource.CompositeKind(d.GetCompositeGroupVersionKind()) - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - ro = append(ro, composite.WithKindObserver(composite.KindObserverFunc(r.xrInformers.WatchComposedResources))) + if r.engine.IsRunning(composite.ControllerName(d.GetName())) { + log.Debug("Composite resource controller is running") + d.Status.SetConditions(v1.WatchingComposite()) + return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) } - cr := composite.NewReconciler(r.mgr, ck, ro...) + + ro := r.CompositeReconcilerOptions(ctx, d) + ck := resource.CompositeKind(d.GetCompositeGroupVersionKind()) + + cr := composite.NewReconciler(r.engine.GetClient(), ck, ro...) ko := r.options.ForControllerRuntime() + + // Most controllers use this type of rate limiter to backoff requeues from 1 + // to 60 seconds. Despite the name, it doesn't only rate limit requeues due + // to errors. It also rate limits requeues due to a reconcile returning + // {Requeue: true}. The XR reconciler returns {Requeue: true} while waiting + // for composed resources to become ready, and we don't want to back off as + // far as 60 seconds. Instead we cap the XR reconciler at 30 seconds. + ko.RateLimiter = workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 30*time.Second) ko.Reconciler = ratelimiter.NewReconciler(composite.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) xrGVK := d.GetCompositeGroupVersionKind() - - u := &kunstructured.Unstructured{} - u.SetGroupVersionKind(xrGVK) - name := composite.ControllerName(d.GetName()) - var ca cache.Cache - watches := []controller.Watch{ - controller.For(u, &handler.EnqueueRequestForObject{}), - // enqueue composites whenever a matching CompositionRevision is created - controller.TriggeredBy(source.Kind(r.mgr.GetCache(), &v1.CompositionRevision{}), handler.Funcs{ - CreateFunc: composite.EnqueueForCompositionRevisionFunc(ck, r.mgr.GetCache().List, r.log), - }), - } + + // TODO(negz): Update CompositeReconcilerOptions to produce + // ControllerOptions instead? It bothers me that this is the only feature + // flagged block outside that method. + co := []engine.ControllerOption{engine.WithRuntimeOptions(ko)} if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - // enqueue XRs that when a relevant MR is updated - watches = append(watches, controller.TriggeredBy(&r.xrInformers, handler.Funcs{ - UpdateFunc: func(ctx context.Context, ev runtimeevent.UpdateEvent, q workqueue.RateLimitingInterface) { - enqueueXRsForMR(ca, xrGVK, log)(ctx, ev, q) - }, - })) + // If realtime composition is enabled we'll start watches dynamically, + // so we want to garbage collect watches for composed resource kinds + // that aren't used anymore. + gc := watch.NewGarbageCollector(name, resource.CompositeKind(xrGVK), r.engine, watch.WithLogger(log)) + co = append(co, engine.WithWatchGarbageCollector(gc)) } - c, err := r.composite.Create(name, ko, watches...) - if err != nil { + if err := r.engine.Start(name, co...); err != nil { log.Debug(errStartController, "error", err) err = errors.Wrap(err, errStartController) r.record.Event(d, event.Warning(reasonEstablishXR, err)) return reconcile.Result{}, err } - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - ca = c.GetCache() - if err := ca.IndexField(ctx, u, compositeResourceRefGVKsIndex, IndexCompositeResourceRefGVKs); err != nil { - log.Debug(errAddIndex, "error", err) - // Nothing we can do. At worst, we won't have realtime updates. - } - if err := ca.IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs); err != nil { - log.Debug(errAddIndex, "error", err) - // Nothing we can do. At worst, we won't have realtime updates. - } - } - - if err := c.Start(context.Background()); err != nil { //nolint:contextcheck // the controller actually runs in the background. - log.Debug(errStartController, "error", err) - err = errors.Wrap(err, errStartController) + // This must be *unstructured.Unstructured, not *composite.Unstructured. + // controller-runtime doesn't support watching types that satisfy the + // runtime.Unstructured interface - only *unstructured.Unstructured. + xr := &kunstructured.Unstructured{} + xr.SetGroupVersionKind(xrGVK) + + crh := EnqueueForCompositionRevision(resource.CompositeKind(xrGVK), r.engine.GetClient(), log) + if err := r.engine.StartWatches(name, + engine.WatchFor(xr, engine.WatchTypeCompositeResource, &handler.EnqueueRequestForObject{}), + engine.WatchFor(&v1.CompositionRevision{}, engine.WatchTypeCompositionRevision, crh), + ); err != nil { + log.Debug(errStartWatches, "error", err) + err = errors.Wrap(err, errStartWatches) r.record.Event(d, event.Warning(reasonEstablishXR, err)) return reconcile.Result{}, err } - log.Debug("(Re)started composite resource controller") - if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { - r.xrInformers.RegisterComposite(xrGVK, ca) - } + log.Debug("Started composite resource controller") d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReferenceTo(d.GetCompositeGroupVersionKind()) d.Status.SetConditions(v1.WatchingComposite()) @@ -527,33 +529,33 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // CompositeReconcilerOptions builds the options for a composite resource // reconciler. The options vary based on the supplied feature flags. -func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.CompositeResourceDefinition, c client.Client, l logging.Logger, e event.Recorder) []composite.ReconcilerOption { +func (r *Reconciler) CompositeReconcilerOptions(ctx context.Context, d *v1.CompositeResourceDefinition) []composite.ReconcilerOption { // The default set of reconciler options when no feature flags are enabled. o := []composite.ReconcilerOption{ - composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(c, d.GetConnectionSecretKeys())), + composite.WithConnectionPublishers(composite.NewAPIFilteredSecretPublisher(r.engine.GetClient(), d.GetConnectionSecretKeys())), composite.WithCompositionSelector(composite.NewCompositionSelectorChain( - composite.NewEnforcedCompositionSelector(*d, e), - composite.NewAPIDefaultCompositionSelector(c, *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), e), - composite.NewAPILabelSelectorResolver(c), + composite.NewEnforcedCompositionSelector(*d, r.record), + composite.NewAPIDefaultCompositionSelector(r.engine.GetClient(), *meta.ReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind), r.record), + composite.NewAPILabelSelectorResolver(r.engine.GetClient()), )), - composite.WithLogger(l.WithValues("controller", composite.ControllerName(d.GetName()))), - composite.WithRecorder(e.WithAnnotations("controller", composite.ControllerName(d.GetName()))), - composite.WithPollInterval(co.PollInterval), + composite.WithLogger(r.log.WithValues("controller", composite.ControllerName(d.GetName()))), + composite.WithRecorder(r.record.WithAnnotations("controller", composite.ControllerName(d.GetName()))), + composite.WithPollInterval(r.options.PollInterval), } // We only want to enable Composition environment support if the relevant // feature flag is enabled. Otherwise we will default to noop selector and // fetcher that will always return nil. All environment features are // subsequently skipped if the environment is nil. - if co.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { + if r.options.Features.Enabled(features.EnableAlphaEnvironmentConfigs) { o = append(o, - composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(c)), - composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(c))) + composite.WithEnvironmentSelector(composite.NewAPIEnvironmentSelector(r.engine.GetClient())), + composite.WithEnvironmentFetcher(composite.NewAPIEnvironmentFetcher(r.engine.GetClient()))) } // If external secret stores aren't enabled we just fetch connection details // from Kubernetes secrets. - var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(c) + var fetcher managed.ConnectionDetailsFetcher = composite.NewSecretConnectionDetailsFetcher(r.engine.GetClient()) // We only want to enable ExternalSecretStore support if the relevant // feature flag is enabled. Otherwise, we start the XR reconcilers with @@ -561,68 +563,79 @@ func CompositeReconcilerOptions(co apiextensionscontroller.Options, d *v1.Compos // We also add a new Configurator for ExternalSecretStore which basically // reflects PublishConnectionDetailsWithStoreConfigRef in Composition to // the composite resource. - if co.Features.Enabled(features.EnableAlphaExternalSecretStores) { + if r.options.Features.Enabled(features.EnableAlphaExternalSecretStores) { pc := []managed.ConnectionPublisher{ - composite.NewAPIFilteredSecretPublisher(c, d.GetConnectionSecretKeys()), - composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(c, v1alpha1.StoreConfigGroupVersionKind, - connection.WithTLSConfig(co.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), + composite.NewAPIFilteredSecretPublisher(r.engine.GetClient(), d.GetConnectionSecretKeys()), + composite.NewSecretStoreConnectionPublisher(connection.NewDetailsManager(r.engine.GetClient(), v1alpha1.StoreConfigGroupVersionKind, + connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), d.GetConnectionSecretKeys()), } // If external secret stores are enabled we need to support fetching // connection details from both secrets and external stores. fetcher = composite.ConnectionDetailsFetcherChain{ - composite.NewSecretConnectionDetailsFetcher(c), - connection.NewDetailsManager(c, v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(co.ESSOptions.TLSConfig)), + composite.NewSecretConnectionDetailsFetcher(r.engine.GetClient()), + connection.NewDetailsManager(r.engine.GetClient(), v1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), } cc := composite.NewConfiguratorChain( - composite.NewAPINamingConfigurator(c), - composite.NewAPIConfigurator(c), - composite.NewSecretStoreConnectionDetailsConfigurator(c), + composite.NewAPINamingConfigurator(r.engine.GetClient()), + composite.NewAPIConfigurator(r.engine.GetClient()), + composite.NewSecretStoreConnectionDetailsConfigurator(r.engine.GetClient()), ) o = append(o, composite.WithConnectionPublishers(pc...), - composite.WithConfigurator(cc), - composite.WithComposer(composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)))) + composite.WithConfigurator(cc)) } - // If Composition Functions are enabled we use two different Composer - // implementations. One supports P&T (aka 'Resources mode') and the other - // Functions (aka 'Pipeline mode'). - if co.Features.Enabled(features.EnableBetaCompositionFunctions) { - ptc := composite.NewPTComposer(c, composite.WithComposedConnectionDetailsFetcher(fetcher)) + // This composer is used for mode: Resources Compositions (the default). + ptc := composite.NewPTComposer(r.engine.GetClient(), composite.WithComposedConnectionDetailsFetcher(fetcher)) - fcopts := []composite.FunctionComposerOption{ - composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(c, fetcher)), - composite.WithCompositeConnectionDetailsFetcher(fetcher), - } + // Wrap the PackagedFunctionRunner setup in main with support for loading + // extra resources to satisfy function requirements. + runner := composite.NewFetchingFunctionRunner(r.options.FunctionRunner, composite.NewExistingExtraResourcesFetcher(r.engine.GetClient())) + + // This composer is used for mode: Pipeline Compositions. + fc := composite.NewFunctionComposer(r.engine.GetClient(), runner, + composite.WithComposedResourceObserver(composite.NewExistingComposedResourceObserver(r.engine.GetClient(), fetcher)), + composite.WithCompositeConnectionDetailsFetcher(fetcher), + ) - if co.Features.Enabled(features.EnableBetaCompositionFunctionsExtraResources) { - fcopts = append(fcopts, composite.WithExtraResourcesFetcher(composite.NewExistingExtraResourcesFetcher(c))) + // We use two different Composer implementations. One supports P&T (aka + // 'Resources mode') and the other Functions (aka 'Pipeline mode'). + o = append(o, composite.WithComposer(composite.ComposerSelectorFn(func(cm *v1.CompositionMode) composite.Composer { + // Resources mode is the implicit default. + m := v1.CompositionModeResources + if cm != nil { + m = *cm + } + switch m { + case v1.CompositionModeResources: + return ptc + case v1.CompositionModePipeline: + return fc + default: + // This shouldn't be possible, but just in case return the + // default Composer. + return ptc } + }))) - fc := composite.NewFunctionComposer(c, co.FunctionRunner, fcopts...) + // If realtime compositions are enabled we pass the ControllerEngine to the + // XR reconciler so that it can start watches for composed resources. + if r.options.Features.Enabled(features.EnableAlphaRealtimeCompositions) { + gvk := d.GetCompositeGroupVersionKind() + u := &kunstructured.Unstructured{} + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) + + // Add an index to the controller engine's client. + if err := r.engine.GetFieldIndexer().IndexField(ctx, u, compositeResourcesRefsIndex, IndexCompositeResourcesRefs); err != nil { + r.log.Debug(errAddIndex, "error", err) + } - // Note that if external secret stores are enabled this will supersede - // the WithComposer option specified in that block. - o = append(o, composite.WithComposer(composite.ComposerSelectorFn(func(cm *v1.CompositionMode) composite.Composer { - // Resources mode is the implicit default. - m := v1.CompositionModeResources - if cm != nil { - m = *cm - } - switch m { - case v1.CompositionModeResources: - return ptc - case v1.CompositionModePipeline: - return fc - default: - // This shouldn't be possible, but just in case return the - // default Composer. - return ptc - } - }))) + h := EnqueueCompositeResources(resource.CompositeKind(d.GetCompositeGroupVersionKind()), r.engine.GetClient(), r.log) + o = append(o, composite.WithWatchStarter(composite.ControllerName(d.GetName()), h, r.engine)) } return o diff --git a/internal/controller/apiextensions/definition/reconciler_test.go b/internal/controller/apiextensions/definition/reconciler_test.go index a65f7f664..c92ca9b62 100644 --- a/internal/controller/apiextensions/definition/reconciler_test.go +++ b/internal/controller/apiextensions/definition/reconciler_test.go @@ -20,57 +20,70 @@ import ( "context" "testing" - "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - ctrlconfig "sigs.k8s.io/controller-runtime/pkg/config" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" - apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" - "github.com/crossplane/crossplane/internal/features" + "github.com/crossplane/crossplane/internal/engine" +) + +var ( + _ ControllerEngine = &MockEngine{} + _ ControllerEngine = &NopEngine{} ) type MockEngine struct { - ControllerEngine - MockCreate func(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) - MockStart func(name string, o kcontroller.Options, w ...controller.Watch) error - MockStop func(name string) - MockErr func(name string) error + MockStart func(name string, o ...engine.ControllerOption) error + MockStop func(ctx context.Context, name string) error + MockIsRunning func(name string) bool + MockGetWatches func(name string) ([]engine.WatchID, error) + MockStartWatches func(name string, ws ...engine.Watch) error + MockStopWatches func(ctx context.Context, name string, ws ...engine.WatchID) (int, error) + MockGetClient func() client.Client + MockGetFieldIndexer func() client.FieldIndexer +} + +func (m *MockEngine) IsRunning(name string) bool { + return m.MockIsRunning(name) +} + +func (m *MockEngine) Start(name string, o ...engine.ControllerOption) error { + return m.MockStart(name, o...) +} + +func (m *MockEngine) Stop(ctx context.Context, name string) error { + return m.MockStop(ctx, name) +} + +func (m *MockEngine) GetWatches(name string) ([]engine.WatchID, error) { + return m.MockGetWatches(name) } -func (m *MockEngine) Create(name string, o kcontroller.Options, w ...controller.Watch) (controller.NamedController, error) { - return m.MockCreate(name, o, w...) +func (m *MockEngine) StartWatches(name string, ws ...engine.Watch) error { + return m.MockStartWatches(name, ws...) } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...controller.Watch) error { - return m.MockStart(name, o, w...) +func (m *MockEngine) StopWatches(ctx context.Context, name string, ws ...engine.WatchID) (int, error) { + return m.MockStopWatches(ctx, name, ws...) } -func (m *MockEngine) Stop(name string) { - m.MockStop(name) +func (m *MockEngine) GetClient() client.Client { + return m.MockGetClient() } -func (m *MockEngine) Err(name string) error { - return m.MockErr(name) +func (m *MockEngine) GetFieldIndexer() client.FieldIndexer { + return m.MockGetFieldIndexer() } func TestReconcile(t *testing.T) { @@ -80,7 +93,7 @@ func TestReconcile(t *testing.T) { ctrlr := true type args struct { - mgr manager.Manager + ca resource.ClientApplicator opts []ReconcilerOption } type want struct { @@ -96,13 +109,10 @@ func TestReconcile(t *testing.T) { "CompositeResourceDefinitionNotFound": { reason: "We should not return an error if the CompositeResourceDefinition was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, want: want{ @@ -112,13 +122,10 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceDefinitionError": { reason: "We should return any other error encountered while getting a CompositeResourceDefinition.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, want: want{ @@ -128,13 +135,12 @@ func TestReconcile(t *testing.T) { "RenderCustomResourceDefinitionError": { reason: "We should return any error we encounter rendering a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return nil, errBoom })), @@ -147,18 +153,17 @@ func TestReconcile(t *testing.T) { "SetTerminatingConditionError": { reason: "We should return any error we encounter while setting the terminating status condition.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + d := o.(*v1.CompositeResourceDefinition) + d.SetDeletionTimestamp(&now) + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*v1.CompositeResourceDefinition) - d.SetDeletionTimestamp(&now) - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -171,24 +176,23 @@ func TestReconcile(t *testing.T) { "GetCustomResourceDefinitionError": { reason: "We should return any error we encounter getting a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + return errBoom + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - return errBoom - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -198,24 +202,54 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errGetCRD), }, }, - "RemoveFinalizerError": { - reason: "We should return any error we encounter while removing a finalizer.", + "CustomResourceDefinitionNotFoundStopControllerError": { + reason: "We should return any error we encounter while stopping our controller (just in case) when the CRD doesn't exist.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "RemoveFinalizerError": { + reason: "We should return any error we encounter while removing a finalizer.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -231,21 +265,20 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue when deleted if we successfully cleaned up our CRD and removed our finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -261,29 +294,28 @@ func TestReconcile(t *testing.T) { "DeleteAllCustomResourcesError": { reason: "We should return any error we encounter while deleting all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -296,30 +328,29 @@ func TestReconcile(t *testing.T) { "ListCustomResourcesError": { reason: "We should return any error we encounter while listing all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -332,36 +363,35 @@ func TestReconcile(t *testing.T) { "WaitForDeleteAllOf": { reason: "We should record the pending deletion of defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -371,34 +401,73 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: true}, }, }, - "DeleteCustomResourceDefinitionError": { - reason: "We should return any error we encounter while deleting the CRD we created.", + "StopControllerError": { + reason: "We should return any error we encounter while stopping our controller.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "DeleteCustomResourceDefinitionError": { + reason: "We should return any error we encounter while deleting the CRD we created.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -411,42 +480,41 @@ func TestReconcile(t *testing.T) { "SuccessfulCleanup": { reason: "We should requeue to remove our finalizer once we've cleaned up our defined resources and CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.SetUID(owner) - want.SetDeletionTimestamp(&now) - want.Status.SetConditions(v1.TerminatingComposite()) - - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) - } + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.SetUID(owner) + want.SetDeletionTimestamp(&now) + want.Status.SetConditions(v1.TerminatingComposite()) + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) + } - return nil - }), - }, - }), + return nil + }), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -459,13 +527,12 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error we encounter while adding a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -481,16 +548,15 @@ func TestReconcile(t *testing.T) { "ApplyCustomResourceDefinitionError": { reason: "We should return any error we encounter while applying our CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return errBoom - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return errBoom }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -506,16 +572,15 @@ func TestReconcile(t *testing.T) { "CustomResourceDefinitionIsNotEstablished": { reason: "We should requeue if we're waiting for a newly created CRD to become established.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -528,32 +593,84 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: true}, }, }, - "CreateControllerError": { - reason: "We should return any error we encounter while starting our controller.", + "VersionChangedStopControllerError": { + reason: "We should return any error we encounter while stopping our controller because the XRD's referencable version changed.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + xrd := &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + Group: "example.org", + Names: extv1.CustomResourceDefinitionNames{ + Kind: "XR", + }, + Versions: []v1.CompositeResourceDefinitionVersion{ + { + Name: "v2", + Referenceable: true, + }, + { + Name: "v1", + }, + }, + }, + Status: v1.CompositeResourceDefinitionStatus{ + Controllers: v1.CompositeResourceDefinitionControllerStatus{ + CompositeResourceTypeRef: v1.TypeReference{ + APIVersion: "example.org/v1", + Kind: "XR", + }, + }, + }, + } + + *obj.(*v1.CompositeResourceDefinition) = *xrd + return nil + }), }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStop: func(_ context.Context, _ string) error { + return errBoom }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), }), + }, + }, + want: want{ + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStopController), + }, + }, + "StartControllerError": { + reason: "We should return any error we encounter while starting our controller.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -567,10 +684,11 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { - return nil, errBoom + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return errBoom }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -579,41 +697,68 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errStartController), }, }, - "SuccessfulStart": { - reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + "StartWatchesError": { + reason: "We should return any error we encounter while starting watches.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return nil + }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { + return errBoom }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStartWatches), + }, + }, + "SuccessfulStart": { + reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -627,22 +772,10 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return errBoom }, // This error should only be logged. - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { - return mockNamedController{ - MockStart: func(ctx context.Context) error { return nil }, - MockGetCache: func() cache.Cache { - return &mockCache{ - IndexFieldFn: func(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - return nil - }, - WaitForCacheSyncFn: func(ctx context.Context) bool { - return true - }, - } - }, - }, nil - }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -653,51 +786,83 @@ func TestReconcile(t *testing.T) { "SuccessfulUpdateControllerVersion": { reason: "We should return without requeueing if we successfully ensured our CRD exists, the old controller stopped, and the new one started.", args: args{ - mgr: &mockManager{ - GetCacheFn: func() cache.Cache { - return &mockCache{ - ListFn: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil }, - } - }, - GetClientFn: func() client.Client { - return &test.MockClient{MockList: test.NewMockListFn(nil)} - }, - GetSchemeFn: runtime.NewScheme, - GetRESTMapperFn: func() meta.RESTMapper { - return meta.NewDefaultRESTMapper([]schema.GroupVersion{v1.SchemeGroupVersion}) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + d := obj.(*v1.CompositeResourceDefinition) + d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "old"} + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + want.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "new"} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } + return nil + }), }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - d := obj.(*v1.CompositeResourceDefinition) - d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - d.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "old"} - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - want.Status.Controllers.CompositeResourceTypeRef = v1.TypeReference{APIVersion: "new"} - want.Status.SetConditions(v1.WatchingComposite()) - - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStop: func(_ context.Context, _ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{Requeue: false}, + }, + }, + "NotRestartingWithoutVersionChange": { + reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingComposite()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -711,23 +876,11 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return nil }, - MockCreate: func(_ string, _ kcontroller.Options, _ ...controller.Watch) (controller.NamedController, error) { - return mockNamedController{ - MockStart: func(ctx context.Context) error { return nil }, - MockGetCache: func() cache.Cache { - return &mockCache{ - IndexFieldFn: func(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - return nil - }, - WaitForCacheSyncFn: func(ctx context.Context) bool { - return true - }, - } - }, - }, nil + MockIsRunning: func(_ string) bool { return true }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + t.Errorf("MockStart should not be called") + return nil }, - MockStop: func(_ string) {}, }), }, }, @@ -737,105 +890,17 @@ func TestReconcile(t *testing.T) { }, } - // Run every test with and without the realtime compositions feature. - rtc := apiextensionscontroller.Options{Options: controller.DefaultOptions()} - rtc.Features.Enable(features.EnableAlphaRealtimeCompositions) - - type mode struct { - name string - extra []ReconcilerOption - } - for _, m := range []mode{ - {name: "Default"}, - {name: string(features.EnableAlphaRealtimeCompositions), extra: []ReconcilerOption{WithOptions(rtc)}}, - } { - t.Run(m.name, func(t *testing.T) { - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, m.extra...)...) - got, err := r.Reconcile(context.Background(), reconcile.Request{}) + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + r := NewReconciler(tc.args.ca, tc.args.opts...) + got, err := r.Reconcile(context.Background(), reconcile.Request{}) - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) - } - }) + if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { + t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) } }) } } - -type mockNamedController struct { - MockStart func(ctx context.Context) error - MockGetCache func() cache.Cache -} - -func (m mockNamedController) Start(ctx context.Context) error { - return m.MockStart(ctx) -} - -func (m mockNamedController) GetCache() cache.Cache { - return m.MockGetCache() -} - -type mockManager struct { - manager.Manager - - GetCacheFn func() cache.Cache - GetClientFn func() client.Client - GetSchemeFn func() *runtime.Scheme - GetRESTMapperFn func() meta.RESTMapper - GetConfigFn func() *rest.Config - GetLoggerFn func() logr.Logger - GetControllerOptionsFn func() ctrlconfig.Controller -} - -func (m *mockManager) GetCache() cache.Cache { - return m.GetCacheFn() -} - -func (m *mockManager) GetClient() client.Client { - return m.GetClientFn() -} - -func (m *mockManager) GetScheme() *runtime.Scheme { - return m.GetSchemeFn() -} - -func (m *mockManager) GetRESTMapper() meta.RESTMapper { - return m.GetRESTMapperFn() -} - -func (m *mockManager) GetConfig() *rest.Config { - return m.GetConfigFn() -} - -func (m *mockManager) GetLogger() logr.Logger { - return m.GetLoggerFn() -} - -func (m *mockManager) GetControllerOptions() ctrlconfig.Controller { - return m.GetControllerOptionsFn() -} - -type mockCache struct { - cache.Cache - - ListFn func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error - IndexFieldFn func(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error - WaitForCacheSyncFn func(ctx context.Context) bool -} - -func (m *mockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return m.ListFn(ctx, list, opts...) -} - -func (m *mockCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - return m.IndexFieldFn(ctx, obj, field, extractValue) -} - -func (m *mockCache) WaitForCacheSync(ctx context.Context) bool { - return m.WaitForCacheSyncFn(ctx) -} diff --git a/internal/controller/apiextensions/definition/watch.go b/internal/controller/apiextensions/definition/watch.go new file mode 100644 index 000000000..d515d359c --- /dev/null +++ b/internal/controller/apiextensions/definition/watch.go @@ -0,0 +1,27 @@ +package definition + +import ( + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/crossplane/crossplane-runtime/pkg/resource" + + "github.com/crossplane/crossplane/internal/xcrd" +) + +// IsCompositeResourceCRD accepts any CustomResourceDefinition that represents a +// Composite Resource. +func IsCompositeResourceCRD() resource.PredicateFn { + return func(obj runtime.Object) bool { + crd, ok := obj.(*extv1.CustomResourceDefinition) + if !ok { + return false + } + for _, c := range crd.Spec.Names.Categories { + if c == xcrd.CategoryComposite { + return true + } + } + return false + } +} diff --git a/internal/controller/apiextensions/definition/watch_test.go b/internal/controller/apiextensions/definition/watch_test.go new file mode 100644 index 000000000..cffcaf048 --- /dev/null +++ b/internal/controller/apiextensions/definition/watch_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package definition + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + + v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" +) + +func TestIsCompositeResourceCRD(t *testing.T) { + cases := map[string]struct { + obj runtime.Object + want bool + }{ + "NotCRD": { + want: false, + }, + "XRD": { + obj: &v1.CompositeResourceDefinition{}, + want: false, + }, + "ClaimCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "claim", + }, + }, + }, + }, + want: false, + }, + "CompositeCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "composite", + }, + }, + }, + }, + want: true, + }, + "OtherCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{}, + }, + }, + }, + want: false, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := IsCompositeResourceCRD()(tc.obj) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("\n%s\nIsCompositeResourceCRD(...): -want, +got:\n%s", name, diff) + } + }) + } +} diff --git a/internal/controller/apiextensions/offered/reconciler.go b/internal/controller/apiextensions/offered/reconciler.go index 7804e44a5..86dc59fe7 100644 --- a/internal/controller/apiextensions/offered/reconciler.go +++ b/internal/controller/apiextensions/offered/reconciler.go @@ -30,9 +30,9 @@ import ( kunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crossplane/crossplane-runtime/pkg/connection" @@ -43,12 +43,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" secretsv1alpha1 "github.com/crossplane/crossplane/apis/secrets/v1alpha1" "github.com/crossplane/crossplane/internal/controller/apiextensions/claim" apiextensionscontroller "github.com/crossplane/crossplane/internal/controller/apiextensions/controller" + "github.com/crossplane/crossplane/internal/engine" "github.com/crossplane/crossplane/internal/features" "github.com/crossplane/crossplane/internal/names" "github.com/crossplane/crossplane/internal/xcrd" @@ -67,6 +67,8 @@ const ( errApplyCRD = "cannot apply rendered composite resource claim CustomResourceDefinition" errUpdateStatus = "cannot update status of CompositeResourceDefinition" errStartController = "cannot start composite resource claim controller" + errStopController = "cannot stop composite resource claim controller" + errStartWatches = "cannot start composite resource claim controller watches" errAddFinalizer = "cannot add composite resource claim finalizer" errRemoveFinalizer = "cannot remove composite resource claim finalizer" errDeleteCRD = "cannot delete composite resource claim CustomResourceDefinition" @@ -89,10 +91,36 @@ const ( // A ControllerEngine can start and stop Kubernetes controllers on demand. type ControllerEngine interface { + Start(name string, o ...engine.ControllerOption) error + Stop(ctx context.Context, name string) error IsRunning(name string) bool - Start(name string, o kcontroller.Options, w ...controller.Watch) error - Stop(name string) - Err(name string) error + StartWatches(name string, ws ...engine.Watch) error + GetClient() client.Client +} + +// A NopEngine does nothing. +type NopEngine struct{} + +// Start does nothing. +func (e *NopEngine) Start(_ string, _ ...engine.ControllerOption) error { return nil } + +// Stop does nothing. +func (e *NopEngine) Stop(_ context.Context, _ string) error { return nil } + +// IsRunning always returns true. +func (e *NopEngine) IsRunning(_ string) bool { return true } + +// StartWatches does nothing. +func (e *NopEngine) StartWatches(_ string, _ ...engine.Watch) error { return nil } + +// GetClient returns a nil client. +func (e *NopEngine) GetClient() client.Client { + return nil +} + +// GetFieldIndexer returns a nil field indexer. +func (e *NopEngine) GetFieldIndexer() client.FieldIndexer { + return nil } // A CRDRenderer renders a CompositeResourceDefinition's corresponding @@ -117,16 +145,16 @@ func (fn CRDRenderFn) Render(d *v1.CompositeResourceDefinition) (*extv1.CustomRe func Setup(mgr ctrl.Manager, o apiextensionscontroller.Options) error { name := "offered/" + strings.ToLower(v1.CompositeResourceDefinitionGroupKind) - r := NewReconciler(mgr, + r := NewReconciler(NewClientApplicator(mgr.GetClient()), WithLogger(o.Logger.WithValues("controller", name)), WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + WithControllerEngine(o.ControllerEngine), WithOptions(o)) return ctrl.NewControllerManagedBy(mgr). Named(name). - For(&v1.CompositeResourceDefinition{}). - Owns(&extv1.CustomResourceDefinition{}). - WithEventFilter(resource.NewPredicates(OffersClaim())). + For(&v1.CompositeResourceDefinition{}, builder.WithPredicates(resource.NewPredicates(OffersClaim()))). + Owns(&extv1.CustomResourceDefinition{}, builder.WithPredicates(resource.NewPredicates(IsClaimCRD()))). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) } @@ -168,7 +196,7 @@ func WithFinalizer(f resource.Finalizer) ReconcilerOption { // lifecycles of claim controllers. func WithControllerEngine(c ControllerEngine) ReconcilerOption { return func(r *Reconciler) { - r.claim.ControllerEngine = c + r.engine = c } } @@ -180,32 +208,25 @@ func WithCRDRenderer(c CRDRenderer) ReconcilerOption { } } -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca - } +// NewClientApplicator returns a ClientApplicator suitable for use by the +// offered controller. +func NewClientApplicator(c client.Client) resource.ClientApplicator { + // TODO(negz): Use server-side apply instead of a ClientApplicator. + return resource.ClientApplicator{Client: c, Applicator: resource.NewAPIUpdatingApplicator(c)} } // NewReconciler returns a Reconciler of CompositeResourceDefinitions. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - kube := unstructured.NewClient(mgr.GetClient()) - +func NewReconciler(ca resource.ClientApplicator, opts ...ReconcilerOption) *Reconciler { r := &Reconciler{ - mgr: mgr, - - client: resource.ClientApplicator{ - Client: kube, - Applicator: resource.NewAPIUpdatingApplicator(kube), - }, + client: ca, claim: definition{ - CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), - ControllerEngine: controller.NewEngine(mgr), - Finalizer: resource.NewAPIFinalizer(kube, finalizer), + CRDRenderer: CRDRenderFn(xcrd.ForCompositeResourceClaim), + Finalizer: resource.NewAPIFinalizer(ca, finalizer), }, + engine: &NopEngine{}, + log: logging.NewNopLogger(), record: event.NewNopRecorder(), @@ -222,17 +243,21 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { type definition struct { CRDRenderer - ControllerEngine resource.Finalizer } // A Reconciler reconciles CompositeResourceDefinitions. type Reconciler struct { - mgr manager.Manager + // This client should only be used by this XRD controller, not the claim + // controllers it manages. Claim controllers should use the engine's client. + // This ensures claim controllers will use a client backed by the same cache + // used to power their watches. client resource.ClientApplicator claim definition + engine ControllerEngine + log logging.Logger record event.Recorder @@ -241,7 +266,7 @@ type Reconciler struct { // Reconcile a CompositeResourceDefinition by defining a new kind of composite // resource claim and starting a controller to reconcile it. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are complex. Be wary of adding more. log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -293,11 +318,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // the (presumably exceedingly rare) latter case we'll orphan // the CRD. if !meta.WasCreated(crd) || !metav1.IsControlledBy(crd, d) { - // It's likely that we've already stopped this - // controller on a previous reconcile, but we try again - // just in case. This is a no-op if the controller was - // already stopped. - r.claim.Stop(claim.ControllerName(d.GetName())) + // It's likely that we've already stopped this controller on a + // previous reconcile, but we try again just in case. This is a + // no-op if the controller was already stopped. + if err := r.engine.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonRedactXRC, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource claim controller") if err := r.claim.RemoveFinalizer(ctx, d); err != nil { @@ -348,9 +376,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{Requeue: true}, nil } - // The controller should be stopped before the deletion of CRD - // so that it doesn't crash. - r.claim.Stop(claim.ControllerName(d.GetName())) + // The controller should be stopped before the deletion of CRD so that + // it doesn't crash. + if err := r.engine.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonRedactXRC, err)) + return reconcile.Result{}, err + } log.Debug("Stopped composite resource claim controller") if err := r.client.Delete(ctx, crd); resource.IgnoreNotFound(err) != nil { @@ -407,8 +439,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // upgrading fields that were previously managed using client-side apply. if r.options.Features.Enabled(features.EnableAlphaClaimSSA) { o = append(o, - claim.WithCompositeSyncer(claim.NewServerSideCompositeSyncer(r.client, names.NewNameGenerator(r.client))), - claim.WithManagedFieldsUpgrader(claim.NewPatchingManagedFieldsUpgrader(r.client)), + claim.WithCompositeSyncer(claim.NewServerSideCompositeSyncer(r.engine.GetClient(), names.NewNameGenerator(r.engine.GetClient()))), + claim.WithManagedFieldsUpgrader(claim.NewPatchingManagedFieldsUpgrader(r.engine.GetClient())), ) } @@ -417,50 +449,64 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // their default Connection Propagator. if r.options.Features.Enabled(features.EnableAlphaExternalSecretStores) { pc := claim.ConnectionPropagatorChain{ - claim.NewAPIConnectionPropagator(r.client), - connection.NewDetailsManager(r.client, secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), + claim.NewAPIConnectionPropagator(r.engine.GetClient()), + connection.NewDetailsManager(r.engine.GetClient(), secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig)), } o = append(o, claim.WithConnectionPropagator(pc), claim.WithConnectionUnpublisher( - claim.NewSecretStoreConnectionUnpublisher(connection.NewDetailsManager(r.client, + claim.NewSecretStoreConnectionUnpublisher(connection.NewDetailsManager(r.engine.GetClient(), secretsv1alpha1.StoreConfigGroupVersionKind, connection.WithTLSConfig(r.options.ESSOptions.TLSConfig))))) } - cr := claim.NewReconciler(r.mgr, - resource.CompositeClaimKind(d.GetClaimGroupVersionKind()), - resource.CompositeKind(d.GetCompositeGroupVersionKind()), o...) - - ko := r.options.ForControllerRuntime() - ko.Reconciler = ratelimiter.NewReconciler(claim.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) - - if err := r.claim.Err(claim.ControllerName(d.GetName())); err != nil { - log.Debug("Composite resource controller encountered an error", "error", err) - } - observed := d.Status.Controllers.CompositeResourceClaimTypeRef desired := v1.TypeReferenceTo(d.GetClaimGroupVersionKind()) if observed.APIVersion != "" && observed != desired { - r.claim.Stop(claim.ControllerName(d.GetName())) + if err := r.engine.Stop(ctx, claim.ControllerName(d.GetName())); err != nil { + err = errors.Wrap(err, errStopController) + r.record.Event(d, event.Warning(reasonOfferXRC, err)) + return reconcile.Result{}, err + } log.Debug("Referenceable version changed; stopped composite resource claim controller", "observed-version", observed.APIVersion, "desired-version", desired.APIVersion) } + if r.engine.IsRunning(claim.ControllerName(d.GetName())) { + log.Debug("Composite resource claim controller is running") + d.Status.SetConditions(v1.WatchingClaim()) + return reconcile.Result{Requeue: false}, errors.Wrap(r.client.Status().Update(ctx, d), errUpdateStatus) + } + + cr := claim.NewReconciler(r.engine.GetClient(), + resource.CompositeClaimKind(d.GetClaimGroupVersionKind()), + resource.CompositeKind(d.GetCompositeGroupVersionKind()), o...) + + ko := r.options.ForControllerRuntime() + ko.Reconciler = ratelimiter.NewReconciler(claim.ControllerName(d.GetName()), errors.WithSilentRequeueOnConflict(cr), r.options.GlobalRateLimiter) + + if err := r.engine.Start(claim.ControllerName(d.GetName()), engine.WithRuntimeOptions(ko)); err != nil { + err = errors.Wrap(err, errStartController) + r.record.Event(d, event.Warning(reasonOfferXRC, err)) + return reconcile.Result{}, err + } + log.Debug("Started composite resource claim controller") + + // These must be *unstructured.Unstructured, not e.g. *claim.Unstructured. + // controller-runtime doesn't support watching types that satisfy the + // runtime.Unstructured interface - only *unstructured.Unstructured. cm := &kunstructured.Unstructured{} cm.SetGroupVersionKind(d.GetClaimGroupVersionKind()) + xr := &kunstructured.Unstructured{} + xr.SetGroupVersionKind(d.GetCompositeGroupVersionKind()) - cp := &kunstructured.Unstructured{} - cp.SetGroupVersionKind(d.GetCompositeGroupVersionKind()) - - if err := r.claim.Start(claim.ControllerName(d.GetName()), ko, - controller.For(cm, &handler.EnqueueRequestForObject{}), - controller.For(cp, &EnqueueRequestForClaim{}), + if err := r.engine.StartWatches(claim.ControllerName(d.GetName()), + engine.WatchFor(cm, engine.WatchTypeClaim, &handler.EnqueueRequestForObject{}), + engine.WatchFor(xr, engine.WatchTypeCompositeResource, &EnqueueRequestForClaim{}), ); err != nil { - err = errors.Wrap(err, errStartController) + err = errors.Wrap(err, errStartWatches) r.record.Event(d, event.Warning(reasonOfferXRC, err)) return reconcile.Result{}, err } - log.Debug("(Re)started composite resource claim controller") d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReferenceTo(d.GetClaimGroupVersionKind()) d.Status.SetConditions(v1.WatchingClaim()) diff --git a/internal/controller/apiextensions/offered/reconciler_test.go b/internal/controller/apiextensions/offered/reconciler_test.go index 82ecb0fdd..366117324 100644 --- a/internal/controller/apiextensions/offered/reconciler_test.go +++ b/internal/controller/apiextensions/offered/reconciler_test.go @@ -29,38 +29,49 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/engine" ) type MockEngine struct { - ControllerEngine - MockStart func(name string, o kcontroller.Options, w ...controller.Watch) error - MockStop func(name string) - MockErr func(name string) error + MockStart func(name string, o ...engine.ControllerOption) error + MockStop func(ctx context.Context, name string) error + MockIsRunning func(name string) bool + MockStartWatches func(name string, ws ...engine.Watch) error + MockGetClient func() client.Client } -func (m *MockEngine) Start(name string, o kcontroller.Options, w ...controller.Watch) error { - return m.MockStart(name, o, w...) +var ( + _ ControllerEngine = &MockEngine{} + _ ControllerEngine = &NopEngine{} +) + +func (m *MockEngine) Start(name string, o ...engine.ControllerOption) error { + return m.MockStart(name, o...) } -func (m *MockEngine) Stop(name string) { - m.MockStop(name) +func (m *MockEngine) Stop(ctx context.Context, name string) error { + return m.MockStop(ctx, name) } -func (m *MockEngine) Err(name string) error { - return m.MockErr(name) +func (m *MockEngine) IsRunning(name string) bool { + return m.MockIsRunning(name) +} + +func (m *MockEngine) StartWatches(name string, ws ...engine.Watch) error { + return m.MockStartWatches(name, ws...) +} + +func (m *MockEngine) GetClient() client.Client { + return m.MockGetClient() } func TestReconcile(t *testing.T) { @@ -71,7 +82,7 @@ func TestReconcile(t *testing.T) { ctrlr := true type args struct { - mgr manager.Manager + ca resource.ClientApplicator opts []ReconcilerOption } type want struct { @@ -87,13 +98,10 @@ func TestReconcile(t *testing.T) { "CompositeResourceDefinitionNotFound": { reason: "We should not return an error if the CompositeResourceDefinition was not found.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), + }, }, }, want: want{ @@ -103,13 +111,10 @@ func TestReconcile(t *testing.T) { "GetCompositeResourceDefinitionError": { reason: "We should return any other error encountered while getting a CompositeResourceDefinition.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(errBoom), + }, }, }, want: want{ @@ -119,13 +124,12 @@ func TestReconcile(t *testing.T) { "RenderCompositeResourceDefinitionError": { reason: "We should return any error we encounter while rendering a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return nil, errBoom })), @@ -138,18 +142,17 @@ func TestReconcile(t *testing.T) { "SetTerminatingConditionError": { reason: "We should return any error we encounter while setting the terminating status condition.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + d := o.(*v1.CompositeResourceDefinition) + d.SetDeletionTimestamp(&now) + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*v1.CompositeResourceDefinition) - d.SetDeletionTimestamp(&now) - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(errBoom), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -162,24 +165,23 @@ func TestReconcile(t *testing.T) { "GetCustomResourceDefinitionError": { reason: "We should return any error we encounter while getting a CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + return errBoom + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - return errBoom - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -189,27 +191,60 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errGetCRD), }, }, - "RemoveFinalizerError": { - reason: "We should return any error we encounter while removing a finalizer.", + "CustomResourceDefinitionNotFoundStopControllerError": { + reason: "We should return any error we encounter while stopping our controller (just in case) when the CRD doesn't exist.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "RemoveFinalizerError": { + reason: "We should return any error we encounter while removing a finalizer.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), WithFinalizer(resource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return errBoom }}), @@ -222,24 +257,26 @@ func TestReconcile(t *testing.T) { "SuccessfulDelete": { reason: "We should not requeue when deleted if we successfully cleaned up our CRD and removed our finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + if v, ok := o.(*v1.CompositeResourceDefinition); ok { + d := v1.CompositeResourceDefinition{} + d.SetDeletionTimestamp(&now) + *v = d + } + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - if v, ok := o.(*v1.CompositeResourceDefinition); ok { - d := v1.CompositeResourceDefinition{} - d.SetDeletionTimestamp(&now) - *v = d - } - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), WithFinalizer(resource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }}), @@ -252,29 +289,28 @@ func TestReconcile(t *testing.T) { "ListCustomResourcesError": { reason: "We should return any error we encounter while listing all defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -287,36 +323,35 @@ func TestReconcile(t *testing.T) { "DeleteCustomResourcesError": { reason: "We should return any error we encounter while deleting defined resources.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -329,36 +364,35 @@ func TestReconcile(t *testing.T) { "SuccessfulDeleteCustomResources": { reason: "We should requeue to ensure our defined resources are gone before we remove our CRD.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { + v := o.(*unstructured.UnstructuredList) + *v = unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{{}, {}}, + } + return nil + }), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - v := o.(*unstructured.UnstructuredList) - *v = unstructured.UnstructuredList{ - Items: []unstructured.Unstructured{{}, {}}, - } - return nil - }), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -368,38 +402,80 @@ func TestReconcile(t *testing.T) { r: reconcile.Result{Requeue: true}, }, }, - "DeleteCustomResourceDefinitionError": { - reason: "We should return any error we encounter while deleting the CRD we created.", + "StopControllerError": { + reason: "We should return any error we encounter while stopping our controller.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockDeleteAllOf: test.NewMockDeleteAllOfFn(nil), + MockList: test.NewMockListFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{ - Spec: v1.CompositeResourceDefinitionSpec{}, - } - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(errBoom), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { + return errBoom }, }), + }, + }, + want: want{ + err: errors.Wrap(errBoom, errStopController), + }, + }, + "DeleteCustomResourceDefinitionError": { + reason: "We should return any error we encounter while deleting the CRD we created.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{}, + } + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(errBoom), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), }, }, want: want{ @@ -409,44 +485,46 @@ func TestReconcile(t *testing.T) { "SuccessfulCleanup": { reason: "We should requeue to remove our finalizer once we've cleaned up our defined resources and CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - switch v := o.(type) { - case *v1.CompositeResourceDefinition: - d := v1.CompositeResourceDefinition{} - d.SetUID(owner) - d.SetDeletionTimestamp(&now) - *v = d - case *extv1.CustomResourceDefinition: - crd := extv1.CustomResourceDefinition{} - crd.SetCreationTimestamp(now) - crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) - *v = crd - } - return nil - }), - MockList: test.NewMockListFn(nil), - MockDelete: test.NewMockDeleteFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.SetUID(owner) - want.SetDeletionTimestamp(&now) - want.Status.SetConditions(v1.TerminatingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(o client.Object) error { + switch v := o.(type) { + case *v1.CompositeResourceDefinition: + d := v1.CompositeResourceDefinition{} + d.SetUID(owner) + d.SetDeletionTimestamp(&now) + *v = d + case *extv1.CustomResourceDefinition: + crd := extv1.CustomResourceDefinition{} + crd.SetCreationTimestamp(now) + crd.SetOwnerReferences([]metav1.OwnerReference{{UID: owner, Controller: &ctrlr}}) + *v = crd + } + return nil + }), + MockList: test.NewMockListFn(nil), + MockDelete: test.NewMockDeleteFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(got client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.SetUID(owner) + want.SetDeletionTimestamp(&now) + want.Status.SetConditions(v1.TerminatingClaim()) - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) - } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("MockStatusUpdate: -want, +got:\n%s\n", diff) + } - return nil - }), - }, - }), + return nil + }), + }, + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), + WithControllerEngine(&MockEngine{ + MockStop: func(_ context.Context, _ string) error { return nil }, + }), }, }, want: want{ @@ -456,13 +534,12 @@ func TestReconcile(t *testing.T) { "AddFinalizerError": { reason: "We should return any error we encounter while adding a finalizer.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - }), WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -478,16 +555,15 @@ func TestReconcile(t *testing.T) { "ApplyCRDError": { reason: "We should return any error we encounter while applying our CRD.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return errBoom - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return errBoom }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{}, nil })), @@ -503,41 +579,105 @@ func TestReconcile(t *testing.T) { "CustomResourceDefinitionIsNotEstablished": { reason: "We should requeue if we're waiting for a newly created CRD to become established.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{}, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + }, + }, + want: want{ + r: reconcile.Result{Requeue: true}, + }, + }, + "VersionChangedStopControllerError": { + reason: "We should return any error we encounter while stopping our controller because the XRD's referencable version changed.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + xrd := &v1.CompositeResourceDefinition{ + Spec: v1.CompositeResourceDefinitionSpec{ + Group: "example.org", + ClaimNames: &extv1.CustomResourceDefinitionNames{ + Kind: "Claim", + }, + Versions: []v1.CompositeResourceDefinitionVersion{ + { + Name: "v2", + Referenceable: true, + }, + { + Name: "v1", + }, + }, + }, + Status: v1.CompositeResourceDefinitionStatus{ + Controllers: v1.CompositeResourceDefinitionControllerStatus{ + CompositeResourceClaimTypeRef: v1.TypeReference{ + APIVersion: "example.org/v1", + Kind: "Claim", + }, + }, + }, + } + + *obj.(*v1.CompositeResourceDefinition) = *xrd return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { - return &extv1.CustomResourceDefinition{}, nil + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil })), WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { return nil }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStop: func(_ context.Context, _ string) error { + return errBoom + }, + }), }, }, want: want{ - r: reconcile.Result{Requeue: true}, + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStopController), }, }, "StartControllerError": { reason: "We should return any error we encounter while starting our controller.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { - return nil - }), + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -551,8 +691,9 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(_ string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return errBoom }, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return errBoom }, + MockGetClient: func() client.Client { return test.NewMockClient() }, }), }, }, @@ -560,28 +701,68 @@ func TestReconcile(t *testing.T) { err: errors.Wrap(errBoom, errStartController), }, }, + "StartWatchesError": { + reason: "We should return any error we encounter while starting watches.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, + opts: []ReconcilerOption{ + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + return nil + }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { + return errBoom + }, + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{}, + err: errors.Wrap(errBoom, errStartWatches), + }, + }, "SuccessfulStart": { reason: "We should not requeue if we successfully ensured our CRD exists and controller is started.", args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Status.SetConditions(v1.WatchingClaim()) + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingClaim()) - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -595,8 +776,11 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return errBoom }, // This error should only be logged. - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }}, + MockIsRunning: func(_ string) bool { return false }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, + }, ), }, }, @@ -607,40 +791,85 @@ func TestReconcile(t *testing.T) { "SuccessfulUpdateControllerVersion": { reason: "We should not requeue if we successfully ensured our CRD exists, the old controller stopped, and the new one started.", args: args{ - mgr: &fake.Manager{}, + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + d := obj.(*v1.CompositeResourceDefinition) + d.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} + d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "old"} + return nil + }), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} + want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ + {Name: "old", Referenceable: false}, + {Name: "new", Referenceable: true}, + } + want.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "new"} + want.Status.SetConditions(v1.WatchingClaim()) + + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } + return nil + }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil + }), + }, opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { - d := obj.(*v1.CompositeResourceDefinition) - d.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} - d.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - d.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "old"} - return nil - }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { - want := &v1.CompositeResourceDefinition{} - want.Spec.ClaimNames = &extv1.CustomResourceDefinitionNames{} - want.Spec.Versions = []v1.CompositeResourceDefinitionVersion{ - {Name: "old", Referenceable: false}, - {Name: "new", Referenceable: true}, - } - want.Status.Controllers.CompositeResourceClaimTypeRef = v1.TypeReference{APIVersion: "new"} - want.Status.SetConditions(v1.WatchingClaim()) + WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { + return &extv1.CustomResourceDefinition{ + Status: extv1.CustomResourceDefinitionStatus{ + Conditions: []extv1.CustomResourceDefinitionCondition{ + {Type: extv1.Established, Status: extv1.ConditionTrue}, + }, + }, + }, nil + })), + WithFinalizer(resource.FinalizerFns{AddFinalizerFn: func(_ context.Context, _ resource.Object) error { + return nil + }}), + WithControllerEngine(&MockEngine{ + MockStart: func(_ string, _ ...engine.ControllerOption) error { return nil }, + MockStop: func(_ context.Context, _ string) error { return nil }, + MockIsRunning: func(_ string) bool { return false }, + MockStartWatches: func(_ string, _ ...engine.Watch) error { return nil }, + MockGetClient: func() client.Client { return test.NewMockClient() }, + }), + }, + }, + want: want{ + r: reconcile.Result{Requeue: false}, + }, + }, + "NotRestartingWithoutVersionChange": { + reason: "We should return without requeueing if we successfully ensured our CRD exists and controller is started.", + args: args{ + ca: resource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil), + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + want := &v1.CompositeResourceDefinition{} + want.Status.SetConditions(v1.WatchingClaim()) - if diff := cmp.Diff(want, o); diff != "" { - t.Errorf("-want, +got:\n%s", diff) - } - return nil - }), - }, - Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + if diff := cmp.Diff(want, o); diff != "" { + t.Errorf("-want, +got:\n%s", diff) + } return nil }), + }, + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { + return nil }), + }, + opts: []ReconcilerOption{ WithCRDRenderer(CRDRenderFn(func(_ *v1.CompositeResourceDefinition) (*extv1.CustomResourceDefinition, error) { return &extv1.CustomResourceDefinition{ Status: extv1.CustomResourceDefinitionStatus{ @@ -654,9 +883,11 @@ func TestReconcile(t *testing.T) { return nil }}), WithControllerEngine(&MockEngine{ - MockErr: func(name string) error { return nil }, - MockStart: func(_ string, _ kcontroller.Options, _ ...controller.Watch) error { return nil }, - MockStop: func(_ string) {}, + MockIsRunning: func(_ string) bool { return true }, + MockStart: func(_ string, _ ...engine.ControllerOption) error { + t.Errorf("MockStart should not be called") + return nil + }, }), }, }, @@ -668,7 +899,7 @@ func TestReconcile(t *testing.T) { for name, tc := range cases { t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, WithLogger(testLog))...) + r := NewReconciler(tc.args.ca, append(tc.args.opts, WithLogger(testLog))...) got, err := r.Reconcile(context.Background(), reconcile.Request{}) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { diff --git a/internal/controller/apiextensions/offered/watch.go b/internal/controller/apiextensions/offered/watch.go index 462239037..a2c83d914 100644 --- a/internal/controller/apiextensions/offered/watch.go +++ b/internal/controller/apiextensions/offered/watch.go @@ -19,6 +19,7 @@ package offered import ( "context" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -30,10 +31,10 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composite" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" + "github.com/crossplane/crossplane/internal/xcrd" ) -// OffersClaim accepts objects that are a CompositeResourceDefinition and offer -// a composite resource claim. +// OffersClaim accepts any CompositeResourceDefinition that offers a claim. func OffersClaim() resource.PredicateFn { return func(obj runtime.Object) bool { d, ok := obj.(*v1.CompositeResourceDefinition) @@ -44,6 +45,22 @@ func OffersClaim() resource.PredicateFn { } } +// IsClaimCRD accepts any CustomResourceDefinition that represents a Claim. +func IsClaimCRD() resource.PredicateFn { + return func(obj runtime.Object) bool { + d, ok := obj.(*extv1.CustomResourceDefinition) + if !ok { + return false + } + for _, c := range d.Spec.Names.Categories { + if c == xcrd.CategoryClaim { + return true + } + } + return false + } +} + type adder interface { Add(item any) } diff --git a/internal/controller/apiextensions/offered/watch_test.go b/internal/controller/apiextensions/offered/watch_test.go index c445b002d..4c9f128ad 100644 --- a/internal/controller/apiextensions/offered/watch_test.go +++ b/internal/controller/apiextensions/offered/watch_test.go @@ -32,9 +32,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" ) -var ( - _ handler.EventHandler = &EnqueueRequestForClaim{} -) +var _ handler.EventHandler = &EnqueueRequestForClaim{} func TestOffersClaim(t *testing.T) { cases := map[string]struct { @@ -44,6 +42,10 @@ func TestOffersClaim(t *testing.T) { "NotAnXRD": { want: false, }, + "CRD": { + obj: &extv1.CustomResourceDefinition{}, + want: false, + }, "DoesNotOfferClaim": { obj: &v1.CompositeResourceDefinition{}, want: false, @@ -63,7 +65,59 @@ func TestOffersClaim(t *testing.T) { t.Run(name, func(t *testing.T) { got := OffersClaim()(tc.obj) if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("OffersClaim(...): -want, +got:\n%s", diff) + t.Errorf("\n%s\nOffersClaim(...): -want, +got:\n%s", name, diff) + } + }) + } +} + +func TestIsClaimCRD(t *testing.T) { + cases := map[string]struct { + obj runtime.Object + want bool + }{ + "NotCRD": { + want: false, + }, + "XRD": { + obj: &v1.CompositeResourceDefinition{}, + want: false, + }, + "ClaimCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "claim", + }, + }, + }, + }, + want: true, + }, + "CompositeCRD": { + obj: &extv1.CustomResourceDefinition{ + Spec: extv1.CustomResourceDefinitionSpec{ + Names: extv1.CustomResourceDefinitionNames{ + Categories: []string{ + "composite", + }, + }, + }, + }, + want: false, + }, + "OtherCRD": { + obj: &extv1.CustomResourceDefinition{}, + want: false, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + got := IsClaimCRD()(tc.obj) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("\n%s\nIsClaimCRD(...): -want, +got:\n%s", name, diff) } }) } diff --git a/internal/controller/apiextensions/usage/reconciler.go b/internal/controller/apiextensions/usage/reconciler.go index e66b2df54..58f50a2a5 100644 --- a/internal/controller/apiextensions/usage/reconciler.go +++ b/internal/controller/apiextensions/usage/reconciler.go @@ -66,7 +66,7 @@ const ( errGetUsed = "cannot get used" errAddOwnerToUsage = "cannot update usage resource with owner ref" errAddDetailsAnnotation = "cannot update usage resource with details annotation" - errAddInUseLabel = "cannot add in use use label to the used resource" + errAddInUseLabel = "cannot add in use label to the used resource" errRemoveInUseLabel = "cannot remove in use label from the used resource" errAddFinalizer = "cannot add finalizer" errRemoveFinalizer = "cannot remove finalizer" @@ -86,6 +86,7 @@ const ( reasonRemoveInUseLabel event.Reason = "RemoveInUseLabel" reasonAddFinalizer event.Reason = "AddFinalizer" reasonRemoveFinalizer event.Reason = "RemoveFinalizer" + reasonReplayDeletion event.Reason = "ReplayDeletion" reasonUsageConfigured event.Reason = "UsageConfigured" reasonWaitUsing event.Reason = "WaitingUsingDeleted" @@ -170,6 +171,12 @@ type usageResource struct { // NewReconciler returns a Reconciler of Usages. func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { + // TODO(negz): Stop using this wrapper? It's only necessary if the client is + // backed by a cache, and at the time of writing the manager's client isn't. + // It's configured not to automatically cache unstructured objects. The + // wrapper is needed when caching because controller-runtime doesn't support + // caching types that satisfy runtime.Unstructured - it only supports the + // concrete *unstructured.Unstructured type. kube := unstructured.NewClient(mgr.GetClient()) r := &Reconciler{ @@ -207,7 +214,7 @@ type Reconciler struct { // Reconcile a Usage resource by resolving its selectors, defining ownership // relationship, adding a finalizer and handling proper deletion. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are typically complex. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are typically complex. log := r.log.WithValues("request", req) ctx, cancel := context.WithTimeout(ctx, reconcileTimeout) defer cancel() @@ -316,6 +323,24 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } + if u.Spec.ReplayDeletion != nil && *u.Spec.ReplayDeletion && used.GetAnnotations() != nil { + if policy, ok := used.GetAnnotations()[usage.AnnotationKeyDeletionAttempt]; ok { + // We have already recorded a deletion attempt and want to replay deletion, let's delete the used resource. + + //nolint:contextcheck // We cannot use the context from the reconcile function since it will be cancelled after the reconciliation. + go func() { + // We do the deletion async and after some delay to make sure the usage is deleted before the + // deletion attempt. We remove the finalizer on this Usage right below, so, we know it will disappear + // very soon. + time.Sleep(2 * time.Second) + log.Info("Replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName(), "policy", policy) + if err = r.client.Delete(context.Background(), used, client.PropagationPolicy(policy)); err != nil { + log.Info("Error when replaying deletion of the used resource", "apiVersion", used.GetAPIVersion(), "kind", used.GetKind(), "name", used.GetName(), "err", err) + } + }() + } + } + // Remove the finalizer from the usage if err = r.usage.RemoveFinalizer(ctx, u); err != nil { log.Debug(errRemoveFinalizer, "error", err) @@ -444,7 +469,7 @@ func detailsAnnotation(u *v1alpha1.Usage) string { // composite controller since otherwise we lose the owner reference this // controller puts on the Usage. func RespectOwnerRefs() xpresource.ApplyOption { - return func(ctx context.Context, current, desired runtime.Object) error { + return func(_ context.Context, current, desired runtime.Object) error { cu, ok := current.(*composed.Unstructured) if !ok || cu.GetObjectKind().GroupVersionKind() != v1alpha1.UsageGroupVersionKind { return nil @@ -452,6 +477,7 @@ func RespectOwnerRefs() xpresource.ApplyOption { // This is a Usage resource, so we need to respect existing owner // references in case it has any. if len(cu.GetOwnerReferences()) > 0 { + //nolint:forcetypeassert // This will always be a metav1.Object. desired.(metav1.Object).SetOwnerReferences(cu.GetOwnerReferences()) } return nil diff --git a/internal/controller/apiextensions/usage/reconciler_test.go b/internal/controller/apiextensions/usage/reconciler_test.go index 3b10e3d1f..3225b79b3 100644 --- a/internal/controller/apiextensions/usage/reconciler_test.go +++ b/internal/controller/apiextensions/usage/reconciler_test.go @@ -26,6 +26,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -38,11 +39,12 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" "github.com/crossplane/crossplane/apis/apiextensions/v1alpha1" + "github.com/crossplane/crossplane/internal/usage" "github.com/crossplane/crossplane/internal/xcrd" ) type fakeSelectorResolver struct { - resourceSelectorFn func(ctx context.Context, u *v1alpha1.Usage) error + resourceSelectorFn func(_ context.Context, _ *v1alpha1.Usage) error } func (f fakeSelectorResolver) resolveSelectors(ctx context.Context, u *v1alpha1.Usage) error { @@ -118,7 +120,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return errBoom }, }), @@ -143,7 +145,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -172,7 +174,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -201,13 +203,13 @@ func TestReconcile(t *testing.T) { } return nil }), - MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { + MockUpdate: test.NewMockUpdateFn(nil, func(_ client.Object) error { return nil }), }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -245,7 +247,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -283,7 +285,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -332,7 +334,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -391,7 +393,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -440,7 +442,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -474,7 +476,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -508,7 +510,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -550,7 +552,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -582,13 +584,13 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return errBoom }), }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -620,16 +622,16 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return nil }), - MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { + MockUpdate: test.NewMockUpdateFn(nil, func(_ client.Object) error { return errBoom }), }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -663,7 +665,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -695,7 +697,56 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { + return nil + }), + MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { + if o, ok := obj.(*composed.Unstructured); ok { + if o.GetLabels()[inUseLabelKey] != "" { + t.Errorf("expected in use label to be removed") + } + return nil + } + return errors.New("unexpected object type") + }), + }, + }), + WithSelectorResolver(fakeSelectorResolver{ + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { + return nil + }, + }), + WithFinalizer(xpresource.FinalizerFns{RemoveFinalizerFn: func(_ context.Context, _ xpresource.Object) error { + return nil + }}), + }, + }, + want: want{ + r: reconcile.Result{}, + }, + }, + "SuccessfulDeleteWithReplayDeletion": { + reason: "We should replay deletion after usage is gone and replayDeletion is true.", + args: args{ + mgr: &fake.Manager{}, + opts: []ReconcilerOption{ + WithClientApplicator(xpresource.ClientApplicator{ + Client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + if o, ok := obj.(*v1alpha1.Usage); ok { + o.SetDeletionTimestamp(&now) + o.Spec.ReplayDeletion = ptr.To(true) + o.Spec.Of.ResourceRef = &v1alpha1.ResourceRef{Name: "cool"} + return nil + } + if o, ok := obj.(*composed.Unstructured); ok { + o.SetAnnotations(map[string]string{usage.AnnotationKeyDeletionAttempt: string(metav1.DeletePropagationBackground)}) + o.SetLabels(map[string]string{inUseLabelKey: "true"}) + return nil + } + return errors.New("unexpected object type") + }), + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return nil }), MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { @@ -707,10 +758,13 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), + MockDelete: func(_ context.Context, _ client.Object, _ ...client.DeleteOption) error { + return nil + }, }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), @@ -753,7 +807,7 @@ func TestReconcile(t *testing.T) { } return errors.New("unexpected object type") }), - MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + MockList: test.NewMockListFn(nil, func(_ client.ObjectList) error { return nil }), MockUpdate: test.NewMockUpdateFn(nil, func(obj client.Object) error { @@ -768,7 +822,7 @@ func TestReconcile(t *testing.T) { }, }), WithSelectorResolver(fakeSelectorResolver{ - resourceSelectorFn: func(ctx context.Context, u *v1alpha1.Usage) error { + resourceSelectorFn: func(_ context.Context, _ *v1alpha1.Usage) error { return nil }, }), diff --git a/internal/controller/apiextensions/usage/selector_test.go b/internal/controller/apiextensions/usage/selector_test.go index 5087a827c..e6ce4bfb7 100644 --- a/internal/controller/apiextensions/usage/selector_test.go +++ b/internal/controller/apiextensions/usage/selector_test.go @@ -155,7 +155,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot list the used resources.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, }, @@ -181,7 +181,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot list the using resources.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, }, @@ -214,7 +214,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot update the usage after resolving used resource.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*composed.UnstructuredList) switch l.GetKind() { case "SomeKindList": @@ -234,7 +234,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return errBoom }, }, @@ -260,7 +260,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if we cannot update the usage after resolving using resource.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*composed.UnstructuredList) switch l.GetKind() { case "AnotherKindList": @@ -280,7 +280,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return errBoom }, }, @@ -313,7 +313,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if there are no matching resources.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, }, @@ -340,7 +340,7 @@ func TestResolveSelectors(t *testing.T) { reason: "We should return error if there are no matching resources with controller ref.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*composed.UnstructuredList) switch l.GetKind() { case "SomeKindList": @@ -360,7 +360,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -387,7 +387,7 @@ func TestResolveSelectors(t *testing.T) { reason: "If selectors defined for both \"of\" and \"by\", both should be resolved.", args: args{ client: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + MockList: func(_ context.Context, list client.ObjectList, opts ...client.ListOption) error { l := list.(*composed.UnstructuredList) if v := l.GroupVersionKind().Version; v != "v1" { t.Errorf("unexpected list version: %s", v) @@ -437,7 +437,7 @@ func TestResolveSelectors(t *testing.T) { } return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, diff --git a/internal/controller/pkg/manager/reconciler.go b/internal/controller/pkg/manager/reconciler.go index 6b11fd263..0b4e6c055 100644 --- a/internal/controller/pkg/manager/reconciler.go +++ b/internal/controller/pkg/manager/reconciler.go @@ -41,7 +41,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/controller/pkg/controller" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -217,10 +216,10 @@ func SetupConfiguration(mgr ctrl.Manager, o controller.Options) error { // SetupFunction adds a controller that reconciles Functions. func SetupFunction(mgr ctrl.Manager, o controller.Options) error { - name := "packages/" + strings.ToLower(v1beta1.FunctionGroupKind) - np := func() v1.Package { return &v1beta1.Function{} } - nr := func() v1.PackageRevision { return &v1beta1.FunctionRevision{} } - nrl := func() v1.PackageRevisionList { return &v1beta1.FunctionRevisionList{} } + name := "packages/" + strings.ToLower(v1.FunctionGroupKind) + np := func() v1.Package { return &v1.Function{} } + nr := func() v1.PackageRevision { return &v1.FunctionRevision{} } + nrl := func() v1.PackageRevisionList { return &v1.FunctionRevisionList{} } cs, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { @@ -242,8 +241,8 @@ func SetupFunction(mgr ctrl.Manager, o controller.Options) error { return ctrl.NewControllerManagedBy(mgr). Named(name). - For(&v1beta1.Function{}). - Owns(&v1beta1.FunctionRevision{}). + For(&v1.Function{}). + Owns(&v1.FunctionRevision{}). WithOptions(o.ForControllerRuntime()). Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(NewReconciler(mgr, opts...)), o.GlobalRateLimiter)) } @@ -268,7 +267,7 @@ func NewReconciler(mgr ctrl.Manager, opts ...ReconcilerOption) *Reconciler { } // Reconcile package. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are complex. Be wary of adding more. log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -418,12 +417,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco pr.SetSkipDependencyResolution(p.GetSkipDependencyResolution()) pr.SetCommonLabels(p.GetCommonLabels()) - if pwr, ok := p.(v1.PackageWithRuntime); ok { - pwrr := pr.(v1.PackageRevisionWithRuntime) - pwrr.SetRuntimeConfigRef(pwr.GetRuntimeConfigRef()) - pwrr.SetControllerConfigRef(pwr.GetControllerConfigRef()) - pwrr.SetTLSServerSecretName(pwr.GetTLSServerSecretName()) - pwrr.SetTLSClientSecretName(pwr.GetTLSClientSecretName()) + pwr, pwok := p.(v1.PackageWithRuntime) + prwr, prok := pr.(v1.PackageRevisionWithRuntime) + if pwok && prok { + prwr.SetRuntimeConfigRef(pwr.GetRuntimeConfigRef()) + prwr.SetControllerConfigRef(pwr.GetControllerConfigRef()) + prwr.SetTLSServerSecretName(pwr.GetTLSServerSecretName()) + prwr.SetTLSClientSecretName(pwr.GetTLSClientSecretName()) } // If current revision is not active, and we have an automatic or diff --git a/internal/controller/pkg/manager/reconciler_test.go b/internal/controller/pkg/manager/reconciler_test.go index 6f5ce175f..2313bb1d7 100644 --- a/internal/controller/pkg/manager/reconciler_test.go +++ b/internal/controller/pkg/manager/reconciler_test.go @@ -53,6 +53,7 @@ func NewMockRevisionFn(hash string, err error) func() (string, error) { return hash, err } } + func (m *MockRevisioner) Revision(context.Context, v1.Package) (string, error) { return m.MockRevision() } @@ -489,7 +490,7 @@ func TestReconcile(t *testing.T) { return nil }), }, - Applicator: resource.ApplyFn(func(_ context.Context, o client.Object, _ ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(_ context.Context, _ client.Object, _ ...resource.ApplyOption) error { return errBoom }), }, diff --git a/internal/controller/pkg/manager/revisioner.go b/internal/controller/pkg/manager/revisioner.go index 4e5f90b84..7ed09cf8a 100644 --- a/internal/controller/pkg/manager/revisioner.go +++ b/internal/controller/pkg/manager/revisioner.go @@ -35,7 +35,7 @@ const ( // Revisioner extracts a revision name for a package source. type Revisioner interface { - Revision(context.Context, v1.Package) (string, error) + Revision(ctx context.Context, p v1.Package) (string, error) } // PackageRevisioner extracts a revision name for a package source. diff --git a/internal/controller/pkg/pkg.go b/internal/controller/pkg/pkg.go index e6362997f..77cb46da0 100644 --- a/internal/controller/pkg/pkg.go +++ b/internal/controller/pkg/pkg.go @@ -24,7 +24,6 @@ import ( "github.com/crossplane/crossplane/internal/controller/pkg/manager" "github.com/crossplane/crossplane/internal/controller/pkg/resolver" "github.com/crossplane/crossplane/internal/controller/pkg/revision" - "github.com/crossplane/crossplane/internal/features" ) // Setup package controllers. @@ -32,26 +31,16 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { for _, setup := range []func(ctrl.Manager, controller.Options) error{ manager.SetupConfiguration, manager.SetupProvider, + manager.SetupFunction, resolver.Setup, revision.SetupConfigurationRevision, revision.SetupProviderRevision, + revision.SetupFunctionRevision, } { if err := setup(mgr, o); err != nil { return err } } - // We only want to start the Function controllers if Functions are enabled. - if o.Features.Enabled(features.EnableBetaCompositionFunctions) { - for _, setup := range []func(ctrl.Manager, controller.Options) error{ - manager.SetupFunction, - revision.SetupFunctionRevision, - } { - if err := setup(mgr, o); err != nil { - return err - } - } - } - return nil } diff --git a/internal/controller/pkg/resolver/reconciler.go b/internal/controller/pkg/resolver/reconciler.go index ea94c666b..c0bc15f9d 100644 --- a/internal/controller/pkg/resolver/reconciler.go +++ b/internal/controller/pkg/resolver/reconciler.go @@ -163,7 +163,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { } // Reconcile package revision. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are complex. Be wary of adding more. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -287,7 +287,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco case v1beta1.ProviderPackageType: pack = &v1.Provider{} case v1beta1.FunctionPackageType: - pack = &v1beta1.Function{} + pack = &v1.Function{} default: log.Debug(errInvalidPackageType) return reconcile.Result{Requeue: false}, nil diff --git a/internal/controller/pkg/resolver/reconciler_test.go b/internal/controller/pkg/resolver/reconciler_test.go index bce8eb97c..b8cfa7efd 100644 --- a/internal/controller/pkg/resolver/reconciler_test.go +++ b/internal/controller/pkg/resolver/reconciler_test.go @@ -165,7 +165,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, errBoom }, } @@ -201,7 +201,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockSort: func() ([]string, error) { @@ -240,7 +240,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockSort: func() ([]string, error) { @@ -279,7 +279,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "not.a.valid.package", @@ -322,7 +322,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-b", @@ -369,7 +369,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-b", @@ -417,7 +417,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-c", @@ -466,7 +466,7 @@ func TestReconcile(t *testing.T) { rec: []ReconcilerOption{ WithNewDagFn(func() dag.DAG { return &fakedag.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "hasheddan/config-nop-c", diff --git a/internal/controller/pkg/revision/dependency.go b/internal/controller/pkg/revision/dependency.go index 91e439d56..af7b76f0b 100644 --- a/internal/controller/pkg/revision/dependency.go +++ b/internal/controller/pkg/revision/dependency.go @@ -18,6 +18,8 @@ package revision import ( "context" + "fmt" + "strings" "github.com/Masterminds/semver" "github.com/google/go-containerregistry/pkg/name" @@ -41,7 +43,7 @@ const ( errNotMeta = "meta type is not a valid package" errGetOrCreateLock = "cannot get or create lock" errInitDAG = "cannot initialize dependency graph from the packages in the lock" - errFmtIncompatibleDependency = "incompatible dependencies: %+v" + errFmtIncompatibleDependency = "incompatible dependencies: %s" errFmtMissingDependencies = "missing dependencies: %+v" errDependencyNotInGraph = "dependency is not present in graph" errDependencyNotLockPackage = "dependency in graph is not a lock package" @@ -70,7 +72,7 @@ func NewPackageDependencyManager(c client.Client, nd dag.NewDAGFn, t v1beta1.Pac } // Resolve resolves package dependencies. -func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Object, pr v1.PackageRevision) (found, installed, invalid int, err error) { //nolint:gocyclo // TODO(negz): Can this be refactored for less complexity? +func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Object, pr v1.PackageRevision) (found, installed, invalid int, err error) { //nolint:gocognit // TODO(negz): Can this be refactored for less complexity? // If we are inactive, we don't need to resolve dependencies. if pr.GetDesiredState() == v1.PackageRevisionInactive { return 0, 0, 0, nil @@ -135,6 +137,22 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje Dependencies: sources, } + // Delete packages in lock with same name and distinct source + // This is a corner case when source is updated but image SHA is not (i.e. relocate same image + // to another registry) + for _, lp := range lock.Packages { + if self.Name == lp.Name && self.Type == lp.Type && self.Source != lp.Identifier() { + if err := m.RemoveSelf(ctx, pr); err != nil { + return found, installed, invalid, err + } + // refresh the lock to be in sync with the contents + if err = m.client.Get(ctx, types.NamespacedName{Name: lockName}, lock); err != nil { + return found, installed, invalid, err + } + break + } + } + prExists := false for _, lp := range lock.Packages { if lp.Name == pr.GetName() { @@ -207,12 +225,16 @@ func (m *PackageDependencyManager) Resolve(ctx context.Context, pkg runtime.Obje return found, installed, invalid, err } if !c.Check(v) { - invalidDeps = append(invalidDeps, lp.Identifier()) + s := fmt.Sprintf("existing package %s@%s", lp.Identifier(), lp.Version) + if dep.Constraints != "" { + s = fmt.Sprintf("%s is incompatible with constraint %s", s, strings.TrimSpace(dep.Constraints)) + } + invalidDeps = append(invalidDeps, s) } } invalid = len(invalidDeps) if invalid > 0 { - return found, installed, invalid, errors.Errorf(errFmtIncompatibleDependency, invalidDeps) + return found, installed, invalid, errors.Errorf(errFmtIncompatibleDependency, strings.Join(invalidDeps, "; ")) } return found, installed, invalid, nil } diff --git a/internal/controller/pkg/revision/dependency_test.go b/internal/controller/pkg/revision/dependency_test.go index 3bfc3d922..f8e79d009 100644 --- a/internal/controller/pkg/revision/dependency_test.go +++ b/internal/controller/pkg/revision/dependency_test.go @@ -42,6 +42,7 @@ var _ DependencyManager = &PackageDependencyManager{} func TestResolve(t *testing.T) { errBoom := errors.New("boom") + mockUpdateCallCount := 0 type args struct { dep *PackageDependencyManager @@ -173,7 +174,7 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { @@ -200,14 +201,14 @@ func TestResolve(t *testing.T) { args: args{ dep: &PackageDependencyManager{ client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + MockGet: test.NewMockGetFn(nil, func(_ client.Object) error { return nil }), MockUpdate: test.NewMockUpdateFn(nil), }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockNodeExists: func(_ string) bool { @@ -287,7 +288,7 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return []dag.Node{ &v1beta1.Dependency{ Package: "not-here-2", @@ -375,7 +376,7 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { @@ -433,7 +434,7 @@ func TestResolve(t *testing.T) { total: 3, installed: 3, invalid: 2, - err: errors.Errorf(errFmtIncompatibleDependency, []string{"not-here-1", "not-here-2"}), + err: errors.Errorf(errFmtIncompatibleDependency, "existing package not-here-1@v0.0.1 is incompatible with constraint >=v0.1.0; existing package not-here-2@v0.0.1 is incompatible with constraint >=v0.1.0"), }, }, "SuccessfulSelfExistValidDependencies": { @@ -478,10 +479,10 @@ func TestResolve(t *testing.T) { }, newDag: func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, - MockNodeExists: func(identifier string) bool { + MockNodeExists: func(_ string) bool { return true }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { @@ -553,9 +554,68 @@ func TestResolve(t *testing.T) { invalid: 0, }, }, + "SuccessfulLockPackageSourceMismatch": { + reason: "Should not return error if source in packages does not match provider revision package.", + args: args{ + dep: &PackageDependencyManager{ + client: &test.MockClient{ + MockGet: test.NewMockGetFn(nil, func(obj client.Object) error { + l := obj.(*v1beta1.Lock) + if mockUpdateCallCount < 1 { + l.Packages = []v1beta1.LockPackage{ + { + Name: "config-nop-a-abc123", + // Source mistmatch provider revision package + Source: "hasheddan/config-nop-b", + }, + } + } else { + l.Packages = []v1beta1.LockPackage{} + } + return nil + }), + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { + mockUpdateCallCount++ + return nil + }, + }, + newDag: func() dag.DAG { + return &dagfake.MockDag{ + MockInit: func(_ []dag.Node) ([]dag.Node, error) { + return []dag.Node{}, nil + }, + MockTraceNode: func(s string) (map[string]dag.Node, error) { + if s == "hasheddan/config-nop-a" { + return map[string]dag.Node{ + s: &v1beta1.Dependency{}, + }, nil + } + return nil, errors.New("missing node in tree") + }, + MockAddOrUpdateNodes: func(_ ...dag.Node) {}, + } + }, + }, + meta: &pkgmetav1.Configuration{}, + pr: &v1.ConfigurationRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-nop-a-abc123", + }, + Spec: v1.PackageRevisionSpec{ + Package: "hasheddan/config-nop-a:v0.0.1", + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + want: want{ + total: 1, + installed: 1, + }, + }, } for name, tc := range cases { + mockUpdateCallCount = 0 t.Run(name, func(t *testing.T) { total, installed, invalid, err := tc.args.dep.Resolve(context.TODO(), tc.args.meta, tc.args.pr) diff --git a/internal/controller/pkg/revision/establisher.go b/internal/controller/pkg/revision/establisher.go index 9760d05fd..bd5f1471f 100644 --- a/internal/controller/pkg/revision/establisher.go +++ b/internal/controller/pkg/revision/establisher.go @@ -128,7 +128,7 @@ func (e *APIEstablisher) Establish(ctx context.Context, objs []runtime.Object, p // ReleaseObjects removes control of owned resources in the API server for a // package revision. -func (e *APIEstablisher) ReleaseObjects(ctx context.Context, parent v1.PackageRevision) error { //nolint:gocyclo // complexity coming from parallelism. +func (e *APIEstablisher) ReleaseObjects(ctx context.Context, parent v1.PackageRevision) error { //nolint:gocognit // complexity coming from parallelism. // Note(turkenh): We rely on status.objectRefs to get the list of objects // that are controlled by the package revision. Relying on the status is // not ideal as it might get lost (e.g. if the status subresource is @@ -145,7 +145,6 @@ func (e *APIEstablisher) ReleaseObjects(ctx context.Context, parent v1.PackageRe g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrentEstablishers) for _, ref := range allObjs { - ref := ref // Pin the loop variable. g.Go(func() error { select { case <-ctx.Done(): @@ -222,7 +221,7 @@ func (e *APIEstablisher) addLabels(objs []runtime.Object, parent v1.PackageRevis return nil } -func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, parent v1.PackageRevision, control bool) (allObjs []currentDesired, err error) { //nolint:gocyclo // TODO(negz): Refactor this to break up complexity. +func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, parent v1.PackageRevision, control bool) (allObjs []currentDesired, err error) { //nolint:gocognit // TODO(negz): Refactor this to break up complexity. var webhookTLSCert []byte if parentWithRuntime, ok := parent.(v1.PackageRevisionWithRuntime); ok && control { webhookTLSCert, err = e.getWebhookTLSCert(ctx, parentWithRuntime) @@ -235,11 +234,10 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa g.SetLimit(maxConcurrentEstablishers) out := make(chan currentDesired, len(objs)) for _, res := range objs { - res := res // Pin the range variable before using it in a Goroutine. g.Go(func() error { // Assert desired object to resource.Object so that we can access its // metadata. - d, ok := res.(resource.Object) + desired, ok := res.(resource.Object) if !ok { return errors.New(errAssertResourceObj) } @@ -257,7 +255,7 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa if !ok { return errors.New(errAssertClientObj) } - err := e.client.Get(ctx, types.NamespacedName{Name: d.GetName(), Namespace: d.GetNamespace()}, current) + err := e.client.Get(ctx, types.NamespacedName{Name: desired.GetName(), Namespace: desired.GetNamespace()}, current) if resource.IgnoreNotFound(err) != nil { return err } @@ -268,26 +266,25 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa // We will not create a resource if we are not going to control it, // so we don't need to check with dry run. if control { - if err := e.create(ctx, d, parent, client.DryRunAll); err != nil { + if err := e.create(ctx, desired, parent, client.DryRunAll); err != nil { return err } } // Add to objects as not existing. select { - case out <- currentDesired{Desired: d, Current: nil, Exists: false}: + case out <- currentDesired{Desired: desired, Current: nil, Exists: false}: return nil case <-ctx.Done(): return ctx.Err() } } - c := current.(resource.Object) - if err := e.update(ctx, c, d, parent, control, client.DryRunAll); err != nil { + if err := e.update(ctx, current, desired, parent, control, client.DryRunAll); err != nil { return err } // Add to objects as existing. select { - case out <- currentDesired{Desired: d, Current: c, Exists: true}: + case out <- currentDesired{Desired: desired, Current: current, Exists: true}: return nil case <-ctx.Done(): return ctx.Err() @@ -305,7 +302,7 @@ func (e *APIEstablisher) validate(ctx context.Context, objs []runtime.Object, pa return allObjs, nil } -func (e *APIEstablisher) enrichControlledResource(res runtime.Object, webhookTLSCert []byte, parent v1.PackageRevision) error { //nolint:gocyclo // just a switch +func (e *APIEstablisher) enrichControlledResource(res runtime.Object, webhookTLSCert []byte, parent v1.PackageRevision) error { //nolint:gocognit // just a switch // The generated webhook configurations have a static hard-coded name // that the developers of the providers can't affect. Here, we make sure // to distinguish one from the other by setting the name to the parent @@ -389,12 +386,11 @@ func (e *APIEstablisher) getWebhookTLSCert(ctx context.Context, parentWithRuntim return webhookTLSCert, nil } -func (e *APIEstablisher) establish(ctx context.Context, allObjs []currentDesired, parent client.Object, control bool) ([]xpv1.TypedReference, error) { //nolint:gocyclo // Only slightly over (12). +func (e *APIEstablisher) establish(ctx context.Context, allObjs []currentDesired, parent client.Object, control bool) ([]xpv1.TypedReference, error) { g, ctx := errgroup.WithContext(ctx) g.SetLimit(maxConcurrentEstablishers) out := make(chan xpv1.TypedReference, len(allObjs)) for _, cd := range allObjs { - cd := cd // Pin the loop variable. g.Go(func() error { if !cd.Exists { // Only create a missing resource if we are going to control it. diff --git a/internal/controller/pkg/revision/establisher_test.go b/internal/controller/pkg/revision/establisher_test.go index ee12d8c10..d550a896c 100644 --- a/internal/controller/pkg/revision/establisher_test.go +++ b/internal/controller/pkg/revision/establisher_test.go @@ -20,7 +20,6 @@ import ( "context" "testing" - "github.com/aws/smithy-go/ptr" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" admv1 "k8s.io/api/admissionregistration/v1" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" @@ -141,7 +141,7 @@ func TestAPIEstablisherEstablish(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if s, ok := obj.(*corev1.Secret); ok { (&corev1.Secret{ Data: map[string][]byte{ @@ -353,7 +353,7 @@ func TestAPIEstablisherEstablish(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { s := &corev1.Secret{} s.DeepCopyInto(obj.(*corev1.Secret)) return nil @@ -378,7 +378,7 @@ func TestAPIEstablisherEstablish(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { s := &corev1.Secret{} s.DeepCopyInto(obj.(*corev1.Secret)) return nil @@ -495,7 +495,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, @@ -524,7 +524,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return kerrors.NewNotFound(schema.GroupResource{}, "") }, }, @@ -553,7 +553,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -573,7 +573,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return errBoom }, }, @@ -602,10 +602,10 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -625,7 +625,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -645,7 +645,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { t.Errorf("should not have called update") return nil }, @@ -675,7 +675,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -688,7 +688,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { o := obj.(*unstructured.Unstructured) if len(o.GetOwnerReferences()) != 2 { t.Errorf("expected 2 owner references, got %d", len(o.GetOwnerReferences())) @@ -697,7 +697,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { for _, ref := range o.GetOwnerReferences() { if ref.Kind == "ProviderRevision" && ref.UID == "some-unique-uid-2312" { found = true - if ptr.ToBool(ref.Controller) { + if ptr.Deref(ref.Controller, false) { t.Errorf("expected controller to be false, got %t", *ref.Controller) } } @@ -736,7 +736,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { args: args{ est: &APIEstablisher{ client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { o := obj.(*unstructured.Unstructured) o.SetOwnerReferences([]metav1.OwnerReference{ { @@ -756,7 +756,7 @@ func TestAPIEstablisherReleaseObjects(t *testing.T) { }) return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { o := obj.(*unstructured.Unstructured) if len(o.GetOwnerReferences()) != 2 { t.Errorf("expected 2 owner references, got %d", len(o.GetOwnerReferences())) diff --git a/internal/controller/pkg/revision/fuzz_test.go b/internal/controller/pkg/revision/fuzz_test.go index f605419ea..1e83e209e 100644 --- a/internal/controller/pkg/revision/fuzz_test.go +++ b/internal/controller/pkg/revision/fuzz_test.go @@ -69,16 +69,16 @@ func newFuzzDag(ff *fuzz.ConsumeFuzzer) (func() dag.DAG, error) { } return func() dag.DAG { return &dagfake.MockDag{ - MockInit: func(nodes []dag.Node) ([]dag.Node, error) { + MockInit: func(_ []dag.Node) ([]dag.Node, error) { return nil, nil }, - MockNodeExists: func(identifier string) bool { + MockNodeExists: func(_ string) bool { return true }, MockTraceNode: func(_ string) (map[string]dag.Node, error) { return traceNodeMap, nil }, - MockGetNode: func(s string) (dag.Node, error) { + MockGetNode: func(_ string) (dag.Node, error) { return lp, nil }, } @@ -102,7 +102,7 @@ func getFuzzMockClient(ff *fuzz.ConsumeFuzzer) (*test.MockClient, error) { } func FuzzRevisionControllerPackageHandling(f *testing.F) { - f.Fuzz(func(t *testing.T, data, revisionData []byte) { + f.Fuzz(func(_ *testing.T, data, revisionData []byte) { ff := fuzz.NewConsumer(revisionData) p := parser.New(metaScheme, objScheme) r := io.NopCloser(bytes.NewReader(data)) diff --git a/internal/controller/pkg/revision/imageback.go b/internal/controller/pkg/revision/imageback.go index 3f886c4c8..81500cf81 100644 --- a/internal/controller/pkg/revision/imageback.go +++ b/internal/controller/pkg/revision/imageback.go @@ -80,7 +80,7 @@ func NewImageBackend(fetcher xpkg.Fetcher, opts ...ImageBackendOption) *ImageBac } // Init initializes an ImageBackend. -func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io.ReadCloser, error) { //nolint:gocyclo // TODO(negz): Can this be made less complex? +func (i *ImageBackend) Init(ctx context.Context, bo ...parser.BackendOption) (io.ReadCloser, error) { // NOTE(hasheddan): we use nestedBackend here because simultaneous // reconciles of providers or configurations can lead to the package // revision being overwritten mid-execution in the shared image backend when diff --git a/internal/controller/pkg/revision/reconciler.go b/internal/controller/pkg/revision/reconciler.go index 9f1ac3f04..1d6d04dcd 100644 --- a/internal/controller/pkg/revision/reconciler.go +++ b/internal/controller/pkg/revision/reconciler.go @@ -46,7 +46,6 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/resource" pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" - pkgmetav1beta1 "github.com/crossplane/crossplane/apis/pkg/meta/v1beta1" v1 "github.com/crossplane/crossplane/apis/pkg/v1" "github.com/crossplane/crossplane/apis/pkg/v1alpha1" "github.com/crossplane/crossplane/apis/pkg/v1beta1" @@ -59,7 +58,7 @@ import ( const ( reconcileTimeout = 3 * time.Minute - // the max size of a package parsed by the parser + // the max size of a package parsed by the parser. maxPackageSize = 200 << 20 // 100 MB ) @@ -381,8 +380,8 @@ func SetupConfigurationRevision(mgr ctrl.Manager, o controller.Options) error { // SetupFunctionRevision adds a controller that reconciles FunctionRevisions. func SetupFunctionRevision(mgr ctrl.Manager, o controller.Options) error { - name := "packages/" + strings.ToLower(v1beta1.FunctionRevisionGroupKind) - nr := func() v1.PackageRevision { return &v1beta1.FunctionRevision{} } + name := "packages/" + strings.ToLower(v1.FunctionRevisionGroupKind) + nr := func() v1.PackageRevision { return &v1.FunctionRevision{} } clientset, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { @@ -404,7 +403,7 @@ func SetupFunctionRevision(mgr ctrl.Manager, o controller.Options) error { cb := ctrl.NewControllerManagedBy(mgr). Named(name). - For(&v1beta1.FunctionRevision{}). + For(&v1.FunctionRevision{}). Owns(&appsv1.Deployment{}). Owns(&corev1.Service{}). Owns(&corev1.Secret{}). @@ -444,7 +443,6 @@ func SetupFunctionRevision(mgr ctrl.Manager, o controller.Options) error { // NewReconciler creates a new package revision reconciler. func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - r := &Reconciler{ client: mgr.GetClient(), cache: xpkg.NewNopCache(), @@ -465,7 +463,7 @@ func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { } // Reconcile package revision. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcilers are often very complex. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Reconcilers are often very complex. log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -549,9 +547,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } var runtimeManifestBuilder ManifestBuilder - if r.runtimeHook != nil { - pwr := pr.(v1.PackageRevisionWithRuntime) - + pwr, hasRuntime := pr.(v1.PackageRevisionWithRuntime) + if hasRuntime && r.runtimeHook != nil { opts, err := r.runtimeManifestBuilderOptions(ctx, pwr) if err != nil { log.Debug(errManifestBuilderOptions, "error", err) @@ -742,9 +739,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco return reconcile.Result{}, err } - pkgMeta, _ := xpkg.TryConvert(pkg.GetMeta()[0], &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) + pkgMeta, _ := xpkg.TryConvert(pkg.GetMeta()[0], &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1.Function{}) - pmo := pkgMeta.(metav1.Object) + pmo := pkgMeta.(metav1.Object) //nolint:forcetypeassert // Will always be metav1.Object. meta.AddLabels(pr, pmo.GetLabels()) meta.AddAnnotations(pr, pmo.GetAnnotations()) if err := r.client.Update(ctx, pr); err != nil { @@ -797,8 +794,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco } } - if r.runtimeHook != nil { - pwr := pr.(v1.PackageRevisionWithRuntime) + if hasRuntime && r.runtimeHook != nil { if err := r.runtimeHook.Pre(ctx, pkgMeta, pwr, runtimeManifestBuilder); err != nil { if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil @@ -847,8 +843,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco }) pr.SetObjects(refs) - if r.runtimeHook != nil { - if err := r.runtimeHook.Post(ctx, pkgMeta, pr.(v1.PackageRevisionWithRuntime), runtimeManifestBuilder); err != nil { + if hasRuntime && r.runtimeHook != nil { + if err := r.runtimeHook.Post(ctx, pkgMeta, pwr, runtimeManifestBuilder); err != nil { if kerrors.IsConflict(err) { return reconcile.Result{Requeue: true}, nil } @@ -883,12 +879,13 @@ func (r *Reconciler) deactivateRevision(ctx context.Context, pr v1.PackageRevisi return errors.Wrap(err, errReleaseObjects) } - if r.runtimeHook == nil { + prwr, ok := pr.(v1.PackageRevisionWithRuntime) + if !ok || r.runtimeHook == nil { return nil } // Call deactivation hook. - if err := r.runtimeHook.Deactivate(ctx, pr.(v1.PackageRevisionWithRuntime), runtimeManifestBuilder); err != nil { + if err := r.runtimeHook.Deactivate(ctx, prwr, runtimeManifestBuilder); err != nil { return errors.Wrap(err, errDeactivationHook) } diff --git a/internal/controller/pkg/revision/reconciler_test.go b/internal/controller/pkg/revision/reconciler_test.go index 2350edc5f..ea7e2636e 100644 --- a/internal/controller/pkg/revision/reconciler_test.go +++ b/internal/controller/pkg/revision/reconciler_test.go @@ -274,7 +274,7 @@ func TestReconcile(t *testing.T) { pr.SetDeletionTimestamp(&now) return nil }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(_ client.Object) error { t.Errorf("StatusUpdate should not be called") return nil }), @@ -380,7 +380,7 @@ func TestReconcile(t *testing.T) { pr.SetDesiredState(v1.PackageRevisionActive) return nil }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(_ client.Object) error { t.Errorf("StatusUpdate should not be called") return nil }), @@ -415,7 +415,7 @@ func TestReconcile(t *testing.T) { pr.SetDesiredState(v1.PackageRevisionActive) return nil }), - MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(o client.Object) error { + MockStatusUpdate: test.NewMockSubResourceUpdateFn(nil, func(_ client.Object) error { t.Errorf("StatusUpdate should not be called") return nil }), @@ -732,7 +732,7 @@ func TestReconcile(t *testing.T) { WithLinter(&MockLinter{MockLint: NewMockLintFn(errBoom)}), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -789,7 +789,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -886,7 +886,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -950,7 +950,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1016,7 +1016,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1084,7 +1084,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1146,7 +1146,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1212,7 +1212,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1275,7 +1275,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1407,7 +1407,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1525,7 +1525,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, @@ -1589,7 +1589,7 @@ func TestReconcile(t *testing.T) { WithParserBackend(parser.NewEchoBackend(string(providerBytes))), WithCache(&xpkgfake.MockCache{ MockHas: xpkgfake.NewMockCacheHasFn(false), - MockStore: func(s string, rc io.ReadCloser) error { + MockStore: func(_ string, rc io.ReadCloser) error { _, err := io.ReadAll(rc) return err }, diff --git a/internal/controller/pkg/revision/runtime.go b/internal/controller/pkg/revision/runtime.go index 67e272252..e04c99a03 100644 --- a/internal/controller/pkg/revision/runtime.go +++ b/internal/controller/pkg/revision/runtime.go @@ -58,6 +58,7 @@ const ( tlsClientCertsDir = "/tls/client" ) +//nolint:gochecknoglobals // We treat these as constants, but take their addresses. var ( runAsUser = int64(2000) runAsGroup = int64(2000) @@ -84,13 +85,13 @@ type ManifestBuilder interface { // establishes objects. type RuntimeHooks interface { // Pre performs operations meant to happen before establishing objects. - Pre(context.Context, runtime.Object, v1.PackageRevisionWithRuntime, ManifestBuilder) error + Pre(ctx context.Context, obj runtime.Object, pr v1.PackageRevisionWithRuntime, b ManifestBuilder) error // Post performs operations meant to happen after establishing objects. - Post(context.Context, runtime.Object, v1.PackageRevisionWithRuntime, ManifestBuilder) error + Post(ctx context.Context, obj runtime.Object, pr v1.PackageRevisionWithRuntime, b ManifestBuilder) error // Deactivate performs operations meant to happen before deactivating a revision. - Deactivate(context.Context, v1.PackageRevisionWithRuntime, ManifestBuilder) error + Deactivate(ctx context.Context, pr v1.PackageRevisionWithRuntime, b ManifestBuilder) error } // RuntimeManifestBuilder builds the runtime manifests for a package revision. @@ -344,7 +345,7 @@ func (b *RuntimeManifestBuilder) packageName() string { } func (b *RuntimeManifestBuilder) packageType() string { - if _, ok := b.revision.(*v1beta1.FunctionRevision); ok { + if _, ok := b.revision.(*v1.FunctionRevision); ok { return "function" } return "provider" diff --git a/internal/controller/pkg/revision/runtime_function.go b/internal/controller/pkg/revision/runtime_function.go index b4b0359de..8eb1597c4 100644 --- a/internal/controller/pkg/revision/runtime_function.go +++ b/internal/controller/pkg/revision/runtime_function.go @@ -31,9 +31,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/resource" - pkgmetav1beta1 "github.com/crossplane/crossplane/apis/pkg/meta/v1beta1" + pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/initializer" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -91,7 +90,10 @@ func (h *FunctionHooks) Pre(ctx context.Context, _ runtime.Object, pr v1.Package } // N.B.: We expect the revision to be applied by the caller - fRev := pr.(*v1beta1.FunctionRevision) + fRev, ok := pr.(*v1.FunctionRevision) + if !ok { + return errors.Errorf("cannot apply function package hooks to %T", pr) + } fRev.Status.Endpoint = fmt.Sprintf(serviceEndpointFmt, svc.Name, svc.Namespace, servicePort) secServer := build.TLSServerSecret() @@ -110,8 +112,8 @@ func (h *FunctionHooks) Pre(ctx context.Context, _ runtime.Object, pr v1.Package // Post performs operations meant to happen after establishing objects. func (h *FunctionHooks) Post(ctx context.Context, pkg runtime.Object, pr v1.PackageRevisionWithRuntime, build ManifestBuilder) error { - po, _ := xpkg.TryConvert(pkg, &pkgmetav1beta1.Function{}) - functionMeta, ok := po.(*pkgmetav1beta1.Function) + po, _ := xpkg.TryConvert(pkg, &pkgmetav1.Function{}) + functionMeta, ok := po.(*pkgmetav1.Function) if !ok { return errors.New(errNotFunction) } @@ -134,7 +136,7 @@ func (h *FunctionHooks) Post(ctx context.Context, pkg runtime.Object, pr v1.Pack // `deploymentTemplate.spec.template.spec.serviceAccountName` in the // DeploymentRuntimeConfig. if sa.Name == d.Spec.Template.Spec.ServiceAccountName { - if err := h.client.Apply(ctx, sa); err != nil { + if err := applySA(ctx, h.client, sa); err != nil { return errors.Wrap(err, errApplyFunctionSA) } } @@ -203,7 +205,7 @@ func functionServiceOverrides() []ServiceOverride { // getFunctionImage determines a complete function image, taking into account a // default registry. If the function meta specifies an image, we have a // preference for that image over what is specified in the package revision. -func getFunctionImage(fm *pkgmetav1beta1.Function, pr v1.PackageRevisionWithRuntime, defaultRegistry string) (string, error) { +func getFunctionImage(fm *pkgmetav1.Function, pr v1.PackageRevisionWithRuntime, defaultRegistry string) (string, error) { image := pr.GetSource() if fm.Spec.Image != nil { image = *fm.Spec.Image diff --git a/internal/controller/pkg/revision/runtime_function_test.go b/internal/controller/pkg/revision/runtime_function_test.go index c4ac2192f..178af2816 100644 --- a/internal/controller/pkg/revision/runtime_function_test.go +++ b/internal/controller/pkg/revision/runtime_function_test.go @@ -33,9 +33,8 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/test" - pkgmetav1beta1 "github.com/crossplane/crossplane/apis/pkg/meta/v1beta1" + pkgmetav1 "github.com/crossplane/crossplane/apis/pkg/meta/v1" v1 "github.com/crossplane/crossplane/apis/pkg/v1" - "github.com/crossplane/crossplane/apis/pkg/v1beta1" "github.com/crossplane/crossplane/internal/xpkg" ) @@ -60,11 +59,11 @@ func TestFunctionPreHook(t *testing.T) { "Success": { reason: "Successful run of pre hook.", args: args{ - pkg: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{}, + pkg: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{}, }, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionActive, }, @@ -74,7 +73,7 @@ func TestFunctionPreHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceFn: func(overrides ...ServiceOverride) *corev1.Service { + ServiceFn: func(_ ...ServiceOverride) *corev1.Service { return &corev1.Service{} }, TLSServerSecretFn: func() *corev1.Secret { @@ -82,24 +81,24 @@ func TestFunctionPreHook(t *testing.T) { }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if svc, ok := obj.(*corev1.Service); ok { svc.Name = "some-service" svc.Namespace = "some-namespace" } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionActive, }, @@ -107,7 +106,7 @@ func TestFunctionPreHook(t *testing.T) { TLSServerSecretName: ptr.To("some-server-secret"), }, }, - Status: v1beta1.FunctionRevisionStatus{ + Status: v1.FunctionRevisionStatus{ Endpoint: fmt.Sprintf(serviceEndpointFmt, "some-service", "some-namespace", servicePort), }, }, @@ -157,9 +156,9 @@ func TestFunctionPostHook(t *testing.T) { "FunctionInactive": { reason: "Should do nothing if function revision is inactive.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionInactive, }, @@ -167,8 +166,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ DesiredState: v1.PackageRevisionInactive, }, @@ -179,9 +178,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrApplySA": { reason: "Should return error if we fail to apply service account for active function revision.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -189,25 +188,25 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return errBoom }, }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -220,9 +219,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrApplyDeployment": { reason: "Should return error if we fail to apply deployment for active function revision.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -230,18 +229,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -250,8 +249,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -264,9 +263,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrDeploymentNoAvailableConditionYet": { reason: "Should return error if deployment for active function revision has no available condition yet.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -274,25 +273,25 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -305,9 +304,9 @@ func TestFunctionPostHook(t *testing.T) { "ErrUnavailableDeployment": { reason: "Should return error if deployment is unavailable for function revision.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -315,18 +314,18 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -340,8 +339,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -354,9 +353,9 @@ func TestFunctionPostHook(t *testing.T) { "Successful": { reason: "Should not return error if successfully applied service account and deployment for active function revision and the deployment is ready.", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -364,18 +363,68 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { + return nil + }, + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { + if d, ok := obj.(*appsv1.Deployment); ok { + d.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }} + return nil + } + return nil + }, + }, + }, + want: want{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: functionImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + }, + }, + "SuccessWithExtraSecret": { + reason: "Should not return error if successfully applied service account with additional secret.", + args: args{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: functionImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + manifests: &MockManifestBuilder{ + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { + return &corev1.ServiceAccount{} + }, + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { + return &appsv1.Deployment{} + }, + }, + client: &test.MockClient{ + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { + if sa, ok := obj.(*corev1.ServiceAccount); ok { + sa.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "test_secret"}} + } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -388,8 +437,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -401,9 +450,9 @@ func TestFunctionPostHook(t *testing.T) { "SuccessfulWithExternallyManagedSA": { reason: "Should be successful without creating an SA, when the SA is managed externally", args: args{ - pkg: &pkgmetav1beta1.Function{}, - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + pkg: &pkgmetav1.Function{}, + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -411,15 +460,15 @@ func TestFunctionPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == xpManagedSA { return kerrors.NewNotFound(corev1.Resource("serviceaccount"), xpManagedSA) @@ -427,7 +476,7 @@ func TestFunctionPostHook(t *testing.T) { } return nil }, - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == xpManagedSA { t.Error("unexpected call to create SA when SA is managed externally") @@ -435,7 +484,7 @@ func TestFunctionPostHook(t *testing.T) { } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -453,8 +502,8 @@ func TestFunctionPostHook(t *testing.T) { }, }, want: want{ - rev: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + rev: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, DesiredState: v1.PackageRevisionActive, @@ -501,15 +550,15 @@ func TestFunctionDeactivateHook(t *testing.T) { reason: "Should return error if we fail to delete deployment.", args: args{ manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -525,14 +574,14 @@ func TestFunctionDeactivateHook(t *testing.T) { reason: "Should not return error if successfully deleted service account and deployment.", args: args{ manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "some-sa", }, } }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "some-deployment", @@ -552,7 +601,7 @@ func TestFunctionDeactivateHook(t *testing.T) { }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { switch obj.(type) { case *corev1.ServiceAccount: return errors.New("service account should not be deleted during deactivation") @@ -586,8 +635,8 @@ func TestFunctionDeactivateHook(t *testing.T) { func TestGetFunctionImage(t *testing.T) { type args struct { - functionMeta *pkgmetav1beta1.Function - functionRevision *v1beta1.FunctionRevision + functionMeta *pkgmetav1.Function + functionRevision *v1.FunctionRevision defaultRegistry string } @@ -604,13 +653,13 @@ func TestGetFunctionImage(t *testing.T) { "NoOverrideFromMeta": { reason: "Should use the image from the package revision and add default registry when no override is present.", args: args{ - functionMeta: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{ + functionMeta: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{ Image: nil, }, }, - functionRevision: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + functionRevision: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: "crossplane/func-bar:v1.2.3", }, @@ -626,13 +675,13 @@ func TestGetFunctionImage(t *testing.T) { "WithOverrideFromMeta": { reason: "Should use the override from the function meta when present and add default registry.", args: args{ - functionMeta: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{ + functionMeta: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{ Image: ptr.To("crossplane/func-bar-server:v1.2.3"), }, }, - functionRevision: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + functionRevision: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: "crossplane/func-bar:v1.2.3", }, @@ -648,13 +697,13 @@ func TestGetFunctionImage(t *testing.T) { "RegistrySpecified": { reason: "Should honor the registry as specified on the package, even if its different than the default registry.", args: args{ - functionMeta: &pkgmetav1beta1.Function{ - Spec: pkgmetav1beta1.FunctionSpec{ + functionMeta: &pkgmetav1.Function{ + Spec: pkgmetav1.FunctionSpec{ Image: nil, }, }, - functionRevision: &v1beta1.FunctionRevision{ - Spec: v1beta1.FunctionRevisionSpec{ + functionRevision: &v1.FunctionRevision{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: "registry.notdefault.io/crossplane/func-bar:v1.2.3", }, diff --git a/internal/controller/pkg/revision/runtime_override_options.go b/internal/controller/pkg/revision/runtime_override_options.go index 1ec9b6764..f40cfd7b2 100644 --- a/internal/controller/pkg/revision/runtime_override_options.go +++ b/internal/controller/pkg/revision/runtime_override_options.go @@ -92,6 +92,21 @@ func DeploymentWithNamespace(namespace string) DeploymentOverride { } } +// DeploymentWithOptionalPodScrapeAnnotations adds Prometheus scrape annotations +// to a Deployment pod template if they are not already set. +func DeploymentWithOptionalPodScrapeAnnotations() DeploymentOverride { + return func(d *appsv1.Deployment) { + if d.Spec.Template.Annotations == nil { + d.Spec.Template.Annotations = map[string]string{} + } + if _, ok := d.Spec.Template.Annotations["prometheus.io/scrape"]; !ok { + d.Spec.Template.Annotations["prometheus.io/scrape"] = "true" + d.Spec.Template.Annotations["prometheus.io/port"] = "8080" + d.Spec.Template.Annotations["prometheus.io/path"] = "/metrics" + } + } +} + // DeploymentWithOwnerReferences overrides the owner references of a Deployment. func DeploymentWithOwnerReferences(owners []metav1.OwnerReference) DeploymentOverride { return func(d *appsv1.Deployment) { @@ -255,7 +270,7 @@ func DeploymentWithRuntimeContainer() DeploymentOverride { // DeploymentForControllerConfig overrides the deployment with the values // defined in the ControllerConfig. -func DeploymentForControllerConfig(cc *v1alpha1.ControllerConfig) DeploymentOverride { //nolint:gocyclo // Simple if statements for setting values if they are not nil/empty. +func DeploymentForControllerConfig(cc *v1alpha1.ControllerConfig) DeploymentOverride { //nolint:gocognit // Simple if statements for setting values if they are not nil/empty. return func(d *appsv1.Deployment) { d.Labels = cc.Labels d.Annotations = cc.Annotations @@ -333,8 +348,7 @@ func DeploymentForControllerConfig(cc *v1alpha1.ControllerConfig) DeploymentOver d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, cc.Spec.Volumes...) } if len(cc.Spec.VolumeMounts) > 0 { - d.Spec.Template.Spec.Containers[0].VolumeMounts = - append(d.Spec.Template.Spec.Containers[0].VolumeMounts, cc.Spec.VolumeMounts...) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, cc.Spec.VolumeMounts...) } } } @@ -415,12 +429,10 @@ func mountTLSSecret(secret, volName, mountPath, envName string, d *appsv1.Deploy ReadOnly: true, MountPath: mountPath, } - d.Spec.Template.Spec.Containers[0].VolumeMounts = - append(d.Spec.Template.Spec.Containers[0].VolumeMounts, vm) + d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, vm) envs := []corev1.EnvVar{ {Name: envName, Value: mountPath}, } - d.Spec.Template.Spec.Containers[0].Env = - append(d.Spec.Template.Spec.Containers[0].Env, envs...) + d.Spec.Template.Spec.Containers[0].Env = append(d.Spec.Template.Spec.Containers[0].Env, envs...) } diff --git a/internal/controller/pkg/revision/runtime_override_options_test.go b/internal/controller/pkg/revision/runtime_override_options_test.go index 5838bafee..bfe4300b4 100644 --- a/internal/controller/pkg/revision/runtime_override_options_test.go +++ b/internal/controller/pkg/revision/runtime_override_options_test.go @@ -169,7 +169,8 @@ func TestDeploymentWithRuntimeContainer(t *testing.T) { }, { Name: "another-one", - }, { + }, + { Name: runtimeContainerName, }, }, diff --git a/internal/controller/pkg/revision/runtime_provider.go b/internal/controller/pkg/revision/runtime_provider.go index 023d03819..151309a49 100644 --- a/internal/controller/pkg/revision/runtime_provider.go +++ b/internal/controller/pkg/revision/runtime_provider.go @@ -24,6 +24,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -154,7 +155,7 @@ func (h *ProviderHooks) Post(ctx context.Context, pkg runtime.Object, pr v1.Pack // `deploymentTemplate.spec.template.spec.serviceAccountName` in the // DeploymentRuntimeConfig. if sa.Name == d.Spec.Template.Spec.ServiceAccountName { - if err := h.client.Apply(ctx, sa); err != nil { + if err := applySA(ctx, h.client, sa); err != nil { return errors.Wrap(err, errApplyProviderSA) } } @@ -229,6 +230,11 @@ func providerDeploymentOverrides(pm *pkgmetav1.Provider, pr v1.PackageRevisionWi // and plan to remove this after implementing a migration in a future // release. DeploymentWithSelectors(providerSelectors(pm, pr)), + + // Add optional scrape annotations to the deployment. It is possible to + // disable the scraping by setting the annotation "prometheus.io/scrape" + // as "false" in the DeploymentRuntimeConfig. + DeploymentWithOptionalPodScrapeAnnotations(), } do = append(do, DeploymentRuntimeWithOptionalImage(image)) @@ -287,3 +293,23 @@ func getProviderImage(pm *pkgmetav1.Provider, pr v1.PackageRevisionWithRuntime, return ref.Name(), nil } + +// applySA creates/updates a ServiceAccount and includes any image pull secrets +// that have been added by external controllers. +func applySA(ctx context.Context, cl resource.ClientApplicator, sa *corev1.ServiceAccount) error { + oldSa := &corev1.ServiceAccount{} + if err := cl.Get(ctx, types.NamespacedName{Name: sa.Name, Namespace: sa.Namespace}, oldSa); err == nil { + // Add pull secrets created by other controllers + existingSecrets := make(map[string]bool) + for _, secret := range sa.ImagePullSecrets { + existingSecrets[secret.Name] = true + } + + for _, secret := range oldSa.ImagePullSecrets { + if !existingSecrets[secret.Name] { + sa.ImagePullSecrets = append(sa.ImagePullSecrets, secret) + } + } + } + return cl.Apply(ctx, sa) +} diff --git a/internal/controller/pkg/revision/runtime_provider_test.go b/internal/controller/pkg/revision/runtime_provider_test.go index 58c768c68..5e2bb0253 100644 --- a/internal/controller/pkg/revision/runtime_provider_test.go +++ b/internal/controller/pkg/revision/runtime_provider_test.go @@ -46,9 +46,7 @@ const ( xpManagedSA = "xp-managed-sa" ) -var ( - errBoom = errors.New("boom") -) +var errBoom = errors.New("boom") func TestProviderPreHook(t *testing.T) { type args struct { @@ -149,7 +147,7 @@ func TestProviderPreHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceFn: func(overrides ...ServiceOverride) *corev1.Service { + ServiceFn: func(_ ...ServiceOverride) *corev1.Service { return &corev1.Service{} }, TLSClientSecretFn: func() *corev1.Secret { @@ -160,13 +158,13 @@ func TestProviderPreHook(t *testing.T) { }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, _ client.Object, _ ...client.UpdateOption) error { return nil }, }, @@ -261,18 +259,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return errBoom }, }, @@ -302,18 +300,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -346,18 +344,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { return nil }, }, @@ -387,18 +385,18 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -436,18 +434,68 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { + return nil + }, + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { + if d, ok := obj.(*appsv1.Deployment); ok { + d.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + }} + return nil + } + return nil + }, + }, + }, + want: want{ + rev: &v1.ProviderRevision{ + Spec: v1.ProviderRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: providerImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + }, + }, + "SuccessWithExtraSecret": { + reason: "Should not return error if successfully applied service account with additional secret.", + args: args{ + pkg: &pkgmetav1.Provider{}, + rev: &v1.ProviderRevision{ + Spec: v1.ProviderRevisionSpec{ + PackageRevisionSpec: v1.PackageRevisionSpec{ + Package: providerImage, + DesiredState: v1.PackageRevisionActive, + }, + }, + }, + manifests: &MockManifestBuilder{ + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { + return &corev1.ServiceAccount{} + }, + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { + return &appsv1.Deployment{} + }, + }, + client: &test.MockClient{ + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { + if sa, ok := obj.(*corev1.ServiceAccount); ok { + sa.ImagePullSecrets = []corev1.LocalObjectReference{{Name: "test_secret"}} + } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -483,14 +531,14 @@ func TestProviderPostHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "xp-managed-sa", }, } }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{ Spec: appsv1.DeploymentSpec{ Template: corev1.PodTemplateSpec{ @@ -503,7 +551,7 @@ func TestProviderPostHook(t *testing.T) { }, }, client: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, obj client.Object) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == "xp-managed-sa" { return kerrors.NewNotFound(corev1.Resource("serviceaccount"), "xp-managed-sa") @@ -511,7 +559,7 @@ func TestProviderPostHook(t *testing.T) { } return nil }, - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { if sa, ok := obj.(*corev1.ServiceAccount); ok { if sa.GetName() == "xp-managed-sa" { t.Error("unexpected call to create SA when SA is managed externally") @@ -519,7 +567,7 @@ func TestProviderPostHook(t *testing.T) { } return nil }, - MockPatch: func(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + MockPatch: func(_ context.Context, obj client.Object, _ client.Patch, _ ...client.PatchOption) error { if d, ok := obj.(*appsv1.Deployment); ok { d.Status.Conditions = []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -585,15 +633,15 @@ func TestProviderDeactivateHook(t *testing.T) { reason: "Should return error if we fail to delete deployment.", args: args{ manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{} }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{} }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { if _, ok := obj.(*appsv1.Deployment); ok { return errBoom } @@ -614,14 +662,14 @@ func TestProviderDeactivateHook(t *testing.T) { }, }, manifests: &MockManifestBuilder{ - ServiceAccountFn: func(overrides ...ServiceAccountOverride) *corev1.ServiceAccount { + ServiceAccountFn: func(_ ...ServiceAccountOverride) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "some-sa", }, } }, - DeploymentFn: func(serviceAccount string, overrides ...DeploymentOverride) *appsv1.Deployment { + DeploymentFn: func(_ string, _ ...DeploymentOverride) *appsv1.Deployment { return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "some-deployment", @@ -641,7 +689,7 @@ func TestProviderDeactivateHook(t *testing.T) { }, }, client: &test.MockClient{ - MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + MockDelete: func(_ context.Context, obj client.Object, _ ...client.DeleteOption) error { switch obj.(type) { case *corev1.ServiceAccount: return errors.New("service account should not be deleted during deactivation") diff --git a/internal/controller/pkg/revision/runtime_test.go b/internal/controller/pkg/revision/runtime_test.go index ed73fae81..731bf55bc 100644 --- a/internal/controller/pkg/revision/runtime_test.go +++ b/internal/controller/pkg/revision/runtime_test.go @@ -71,7 +71,7 @@ var ( }, } - functionRevision = &v1beta1.FunctionRevision{ + functionRevision = &v1.FunctionRevision{ TypeMeta: metav1.TypeMeta{ APIVersion: "pkg.crossplane.io/v1beta1", Kind: "FunctionRevision", @@ -82,7 +82,7 @@ var ( v1.LabelParentPackage: functionName, }, }, - Spec: v1beta1.FunctionRevisionSpec{ + Spec: v1.FunctionRevisionSpec{ PackageRevisionSpec: v1.PackageRevisionSpec{ Package: functionImage, }, @@ -159,6 +159,7 @@ func TestRuntimeManifestBuilderDeployment(t *testing.T) { "pkg.crossplane.io/revision": providerRevisionName, }), func(deployment *appsv1.Deployment) { deployment.Spec.Replicas = ptr.To[int32](3) + deployment.Spec.Template.Annotations = nil deployment.Spec.Template.Labels["k"] = "v" deployment.Spec.Template.Spec.Containers[0].Image = "crossplane/provider-foo:v1.2.4" deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, corev1.Volume{Name: "vol-a"}, corev1.Volume{Name: "vol-b"}) @@ -221,6 +222,43 @@ func TestRuntimeManifestBuilderDeployment(t *testing.T) { }), }, }, + "ProviderDeploymentNoScrapeAnnotation": { + reason: "It should be possible to disable default scrape annotations", + args: args{ + builder: &RuntimeManifestBuilder{ + revision: providerRevision, + namespace: namespace, + runtimeConfig: &v1beta1.DeploymentRuntimeConfig{ + Spec: v1beta1.DeploymentRuntimeConfigSpec{ + DeploymentTemplate: &v1beta1.DeploymentTemplate{ + Spec: &appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "prometheus.io/scrape": "false", + }, + }, + Spec: corev1.PodSpec{}, + }, + }, + }, + }, + }, + }, + serviceAccountName: providerRevisionName, + overrides: providerDeploymentOverrides(&pkgmetav1.Provider{ObjectMeta: metav1.ObjectMeta{Name: providerMetaName}}, providerRevision, providerImage), + }, + want: want{ + want: deploymentProvider(providerName, providerRevisionName, providerImage, DeploymentWithSelectors(map[string]string{ + "pkg.crossplane.io/provider": providerMetaName, + "pkg.crossplane.io/revision": providerRevisionName, + }), func(deployment *appsv1.Deployment) { + deployment.Spec.Template.Annotations = map[string]string{ + "prometheus.io/scrape": "false", + } + }), + }, + }, "ProviderDeploymentWithAdvancedRuntimeConfig": { reason: "Baseline provided by the runtime config should be applied to the deployment for advanced use cases", args: args{ @@ -391,6 +429,11 @@ func deploymentProvider(provider string, revision string, image string, override }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "prometheus.io/scrape": "true", + "prometheus.io/port": "8080", + "prometheus.io/path": "/metrics", + }, Labels: map[string]string{ "pkg.crossplane.io/revision": revision, "pkg.crossplane.io/provider": provider, diff --git a/internal/controller/pkg/revision/watch.go b/internal/controller/pkg/revision/watch.go index 936e87272..adfd80544 100644 --- a/internal/controller/pkg/revision/watch.go +++ b/internal/controller/pkg/revision/watch.go @@ -67,7 +67,7 @@ func (e *EnqueueRequestForReferencingProviderRevisions) Generic(ctx context.Cont e.add(ctx, evt.Object, q) } -func (e *EnqueueRequestForReferencingProviderRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { //nolint:gocyclo // it will be simplified soon when we clean up the controller config +func (e *EnqueueRequestForReferencingProviderRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { cc, isCC := obj.(*v1alpha1.ControllerConfig) rc, isRC := obj.(*v1beta1.DeploymentRuntimeConfig) @@ -129,7 +129,7 @@ func (e *EnqueueRequestForReferencingFunctionRevisions) Generic(ctx context.Cont e.add(ctx, evt.Object, q) } -func (e *EnqueueRequestForReferencingFunctionRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { //nolint:gocyclo // it will be simplified soon when we clean up the controller config +func (e *EnqueueRequestForReferencingFunctionRevisions) add(ctx context.Context, obj runtime.Object, queue adder) { cc, isCC := obj.(*v1alpha1.ControllerConfig) rc, isRC := obj.(*v1beta1.DeploymentRuntimeConfig) @@ -137,7 +137,7 @@ func (e *EnqueueRequestForReferencingFunctionRevisions) add(ctx context.Context, return } - l := &v1beta1.FunctionRevisionList{} + l := &v1.FunctionRevisionList{} if err := e.client.List(ctx, l); err != nil { // TODO(hasheddan): Handle this error? return diff --git a/internal/controller/pkg/revision/watch_test.go b/internal/controller/pkg/revision/watch_test.go index 8eaece676..f64e2a582 100644 --- a/internal/controller/pkg/revision/watch_test.go +++ b/internal/controller/pkg/revision/watch_test.go @@ -35,9 +35,7 @@ import ( "github.com/crossplane/crossplane/apis/pkg/v1alpha1" ) -var ( - _ handler.EventHandler = &EnqueueRequestForReferencingProviderRevisions{} -) +var _ handler.EventHandler = &EnqueueRequestForReferencingProviderRevisions{} type addFn func(item any) diff --git a/internal/controller/rbac/controller/options.go b/internal/controller/rbac/controller/options.go index 549d27378..a9f821e26 100644 --- a/internal/controller/rbac/controller/options.go +++ b/internal/controller/rbac/controller/options.go @@ -40,10 +40,6 @@ const ( type Options struct { controller.Options - // ManagementPolicy specifies which roles the RBAC manager should - // manage. - ManagementPolicy ManagementPolicy - // AllowClusterRole is used to determine what additional RBAC // permissions may be granted to Providers that request them. The // provider may request any permission that appears in the named role. diff --git a/internal/controller/rbac/definition/reconciler.go b/internal/controller/rbac/definition/reconciler.go index 6bac28a68..bc4263aef 100644 --- a/internal/controller/rbac/definition/reconciler.go +++ b/internal/controller/rbac/definition/reconciler.go @@ -152,7 +152,6 @@ type Reconciler struct { // Reconcile a CompositeResourceDefinition by creating a series of opinionated // ClusterRoles that may be bound to allow access to the resources it defines. func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -182,7 +181,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco applied := make([]string, 0) for _, cr := range r.rbac.RenderClusterRoles(d) { - cr := cr // Pin range variable so we can take its address. log := log.WithValues("role-name", cr.GetName()) origRV := "" err := r.client.Apply(ctx, &cr, @@ -223,7 +221,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // ClusterRoles. We consider ClusterRoles to be different if their labels and // rules do not match. func ClusterRolesDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.ClusterRole) - d := desired.(*rbacv1.ClusterRole) + // Calling this with anything but ClusterRoles is a programming error. If it + // happens, we probably do want to panic. + c := current.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. return !cmp.Equal(c.GetLabels(), d.GetLabels()) || !cmp.Equal(c.Rules, d.Rules) } diff --git a/internal/controller/rbac/definition/reconciler_test.go b/internal/controller/rbac/definition/reconciler_test.go index fe07f4761..193621591 100644 --- a/internal/controller/rbac/definition/reconciler_test.go +++ b/internal/controller/rbac/definition/reconciler_test.go @@ -143,7 +143,7 @@ func TestReconcile(t *testing.T) { Client: &test.MockClient{ MockGet: test.NewMockGetFn(nil), }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, _ ...resource.ApplyOption) error { // Simulate a no-op change by not allowing the update. return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) }), diff --git a/internal/controller/rbac/definition/roles.go b/internal/controller/rbac/definition/roles.go index 9fc5ddf28..8e5ee29eb 100644 --- a/internal/controller/rbac/definition/roles.go +++ b/internal/controller/rbac/definition/roles.go @@ -53,6 +53,7 @@ const ( suffixFinalizers = "/finalizers" ) +//nolint:gochecknoglobals // We treat these as constants. var ( verbsEdit = []string{rbacv1.VerbAll} verbsView = []string{"get", "list", "watch"} @@ -110,8 +111,11 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole Rules: []rbacv1.PolicyRule{ { APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.Names.Plural}, - Verbs: verbsEdit, + Resources: []string{ + d.Spec.Names.Plural, + d.Spec.Names.Plural + suffixStatus, + }, + Verbs: verbsEdit, }, }, } @@ -129,8 +133,11 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole Rules: []rbacv1.PolicyRule{ { APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.Names.Plural}, - Verbs: verbsView, + Resources: []string{ + d.Spec.Names.Plural, + d.Spec.Names.Plural + suffixStatus, + }, + Verbs: verbsView, }, }, } @@ -147,8 +154,11 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole Rules: []rbacv1.PolicyRule{ { APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.Names.Plural}, - Verbs: verbsBrowse, + Resources: []string{ + d.Spec.Names.Plural, + d.Spec.Names.Plural + suffixStatus, + }, + Verbs: verbsBrowse, }, }, } @@ -175,14 +185,20 @@ func RenderClusterRoles(d *v1.CompositeResourceDefinition) []rbacv1.ClusterRole edit.Rules = append(edit.Rules, rbacv1.PolicyRule{ APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.ClaimNames.Plural}, - Verbs: verbsEdit, + Resources: []string{ + d.Spec.ClaimNames.Plural, + d.Spec.ClaimNames.Plural + suffixStatus, + }, + Verbs: verbsEdit, }) view.Rules = append(view.Rules, rbacv1.PolicyRule{ APIGroups: []string{d.Spec.Group}, - Resources: []string{d.Spec.ClaimNames.Plural}, - Verbs: verbsView, + Resources: []string{ + d.Spec.ClaimNames.Plural, + d.Spec.ClaimNames.Plural + suffixStatus, + }, + Verbs: verbsView, }) // The browse role only includes composite resources; not claims. diff --git a/internal/controller/rbac/definition/roles_test.go b/internal/controller/rbac/definition/roles_test.go index 645071e7f..bc80477e4 100644 --- a/internal/controller/rbac/definition/roles_test.go +++ b/internal/controller/rbac/definition/roles_test.go @@ -96,7 +96,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsEdit, }, }, @@ -114,7 +114,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsView, }, }, @@ -131,7 +131,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsBrowse, }, }, @@ -195,12 +195,12 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsEdit, }, { APIGroups: []string{group}, - Resources: []string{pluralXRC}, + Resources: []string{pluralXRC, pluralXRC + suffixStatus}, Verbs: verbsEdit, }, }, @@ -218,12 +218,12 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsView, }, { APIGroups: []string{group}, - Resources: []string{pluralXRC}, + Resources: []string{pluralXRC, pluralXRC + suffixStatus}, Verbs: verbsView, }, }, @@ -241,7 +241,7 @@ func TestRenderClusterRoles(t *testing.T) { Rules: []rbacv1.PolicyRule{ { APIGroups: []string{group}, - Resources: []string{pluralXR}, + Resources: []string{pluralXR, pluralXR + suffixStatus}, Verbs: verbsBrowse, }, }, diff --git a/internal/controller/rbac/namespace/fuzz_test.go b/internal/controller/rbac/namespace/fuzz_test.go deleted file mode 100644 index db7f399cb..000000000 --- a/internal/controller/rbac/namespace/fuzz_test.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2023 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICEE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIO OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "testing" - - fuzz "github.com/AdaLogics/go-fuzz-headers" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" -) - -func FuzzRenderRoles(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - ff := fuzz.NewConsumer(data) - ns := &corev1.Namespace{} - ff.GenerateStruct(ns) - crs := make([]rbacv1.ClusterRole, 0) - ff.CreateSlice(&crs) - if len(crs) == 0 { - return - } - _ = RenderRoles(ns, crs) - }) -} diff --git a/internal/controller/rbac/namespace/reconciler.go b/internal/controller/rbac/namespace/reconciler.go deleted file mode 100644 index 9d2084eaf..000000000 --- a/internal/controller/rbac/namespace/reconciler.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package namespace implements the RBAC manager's support for namespaces. -package namespace - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/event" - "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/meta" - "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" - "github.com/crossplane/crossplane-runtime/pkg/resource" - - "github.com/crossplane/crossplane/internal/controller/rbac/controller" -) - -const ( - timeout = 2 * time.Minute - - errGetNamespace = "cannot get Namespace" - errApplyRole = "cannot apply Roles" - errListRoles = "cannot list ClusterRoles" -) - -// Event reasons. -const ( - reasonApplyRoles event.Reason = "ApplyRoles" -) - -// A RoleRenderer renders Roles for a given Namespace. -type RoleRenderer interface { - // RenderRoles for the supplied Namespace. - RenderRoles(d *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role -} - -// A RoleRenderFn renders Roles for the supplied Namespace. -type RoleRenderFn func(d *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role - -// RenderRoles renders Roles for the supplied Namespace. -func (fn RoleRenderFn) RenderRoles(d *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role { - return fn(d, crs) -} - -// Setup adds a controller that reconciles a Namespace by creating a series of -// opinionated Roles that may be bound to allow access to resources within that -// namespace. -func Setup(mgr ctrl.Manager, o controller.Options) error { - name := "rbac/namespace" - - r := NewReconciler(mgr, - WithLogger(o.Logger.WithValues("controller", name)), - WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name)))) - - return ctrl.NewControllerManagedBy(mgr). - Named(name). - For(&corev1.Namespace{}). - Owns(&rbacv1.Role{}). - Watches(&rbacv1.ClusterRole{}, &EnqueueRequestForNamespaces{client: mgr.GetClient()}). - WithOptions(o.ForControllerRuntime()). - Complete(ratelimiter.NewReconciler(name, errors.WithSilentRequeueOnConflict(r), o.GlobalRateLimiter)) -} - -// ReconcilerOption is used to configure the Reconciler. -type ReconcilerOption func(*Reconciler) - -// WithLogger specifies how the Reconciler should log messages. -func WithLogger(log logging.Logger) ReconcilerOption { - return func(r *Reconciler) { - r.log = log - } -} - -// WithRecorder specifies how the Reconciler should record Kubernetes events. -func WithRecorder(er event.Recorder) ReconcilerOption { - return func(r *Reconciler) { - r.record = er - } -} - -// WithClientApplicator specifies how the Reconciler should interact with the -// Kubernetes API. -func WithClientApplicator(ca resource.ClientApplicator) ReconcilerOption { - return func(r *Reconciler) { - r.client = ca - } -} - -// WithRoleRenderer specifies how the Reconciler should render RBAC -// Roles. -func WithRoleRenderer(rr RoleRenderer) ReconcilerOption { - return func(r *Reconciler) { - r.rbac = rr - } -} - -// NewReconciler returns a Reconciler of Namespaces. -func NewReconciler(mgr manager.Manager, opts ...ReconcilerOption) *Reconciler { - r := &Reconciler{ - // TODO(negz): Is Updating appropriate here? Probably. - client: resource.ClientApplicator{ - Client: mgr.GetClient(), - Applicator: resource.NewAPIUpdatingApplicator(mgr.GetClient()), - }, - - rbac: RoleRenderFn(RenderRoles), - - log: logging.NewNopLogger(), - record: event.NewNopRecorder(), - } - - for _, f := range opts { - f(r) - } - return r -} - -// A Reconciler reconciles Namespaces. -type Reconciler struct { - client resource.ClientApplicator - rbac RoleRenderer - - log logging.Logger - record event.Recorder -} - -// Reconcile a Namespace by creating a series of opinionated Roles that may be -// bound to allow access to resources within that namespace. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - - log := r.log.WithValues("request", req) - log.Debug("Reconciling") - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - ns := &corev1.Namespace{} - if err := r.client.Get(ctx, req.NamespacedName, ns); err != nil { - // In case object is not found, most likely the object was deleted and - // then disappeared while the event was in the processing queue. We - // don't need to take any action in that case. - log.Debug(errGetNamespace, "error", err) - return reconcile.Result{}, errors.Wrap(resource.IgnoreNotFound(err), errGetNamespace) - } - - log = log.WithValues( - "uid", ns.GetUID(), - "version", ns.GetResourceVersion(), - "name", ns.GetName(), - ) - - if meta.WasDeleted(ns) { - // There's nothing to do if our namespace is being deleted. Any Roles we - // created will be deleted along with the namespace. - return reconcile.Result{Requeue: false}, nil - } - - // NOTE(negz): We don't expect there to be an unwieldy amount of roles, so - // we just list and pass them all. We're listing from a cache that handles - // label selectors locally, so filtering with a label selector here won't - // meaningfully improve performance relative to filtering in RenderRoles. - // https://github.com/kubernetes-sigs/controller-runtime/blob/d6829e9/pkg/cache/internal/cache_reader.go#L131 - l := &rbacv1.ClusterRoleList{} - if err := r.client.List(ctx, l); err != nil { - if kerrors.IsConflict(err) { - return reconcile.Result{Requeue: true}, nil - } - err = errors.Wrap(err, errListRoles) - r.record.Event(ns, event.Warning(reasonApplyRoles, err)) - return reconcile.Result{}, err - } - - var applied []string //nolint:prealloc // We don't know how many roles we'll apply. - for _, rl := range r.rbac.RenderRoles(ns, l.Items) { - log := log.WithValues("role-name", rl.GetName()) - rl := rl // Pin range variable so we can take its address. - - err := r.client.Apply(ctx, &rl, resource.MustBeControllableBy(ns.GetUID()), resource.AllowUpdateIf(RolesDiffer)) - if resource.IsNotAllowed(err) { - log.Debug("Skipped no-op RBAC Role apply") - continue - } - if err != nil { - if kerrors.IsConflict(err) { - return reconcile.Result{Requeue: true}, nil - } - err = errors.Wrap(err, errApplyRole) - r.record.Event(ns, event.Warning(reasonApplyRoles, err)) - return reconcile.Result{}, err - } - - log.Debug("Applied RBAC Role") - applied = append(applied, rl.GetName()) - } - - if len(applied) > 0 { - r.record.Event(ns, event.Normal(reasonApplyRoles, fmt.Sprintf("Applied RBAC Roles: %s", resource.StableNAndSomeMore(resource.DefaultFirstN, applied)))) - } - - return reconcile.Result{Requeue: false}, nil -} - -// RolesDiffer returns true if the supplied objects are different Roles. We -// consider Roles to be different if their crossplane annotations or rules do not match. -func RolesDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.Role) - d := desired.(*rbacv1.Role) - return !equalRolesAnnotations(c, d) || !cmp.Equal(c.Rules, d.Rules) -} - -// equalRolesAnnotations compares the crossplane rbac annotations (prefixed by "rbac.crossplane.io/") -// of two Roles and returns true if they are equal. -func equalRolesAnnotations(current, desired *rbacv1.Role) bool { - currentFiltered := make(map[string]string) - for k, v := range current.GetAnnotations() { - if strings.HasPrefix(k, keyPrefix) { - currentFiltered[k] = v - } - } - - desiredFiltered := make(map[string]string) - for k, v := range desired.GetAnnotations() { - if strings.HasPrefix(k, keyPrefix) { - desiredFiltered[k] = v - } - } - return cmp.Equal(currentFiltered, desiredFiltered) -} diff --git a/internal/controller/rbac/namespace/reconciler_test.go b/internal/controller/rbac/namespace/reconciler_test.go deleted file mode 100644 index fd492dd75..000000000 --- a/internal/controller/rbac/namespace/reconciler_test.go +++ /dev/null @@ -1,296 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "io" - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane-runtime/pkg/resource" - "github.com/crossplane/crossplane-runtime/pkg/resource/fake" - "github.com/crossplane/crossplane-runtime/pkg/test" -) - -func TestReconcile(t *testing.T) { - errBoom := errors.New("boom") - testLog := logging.NewLogrLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(io.Discard)).WithName("testlog")) - now := metav1.Now() - - type args struct { - mgr manager.Manager - opts []ReconcilerOption - } - type want struct { - r reconcile.Result - err error - } - - cases := map[string]struct { - reason string - args args - want want - }{ - "NamespaceNotFound": { - reason: "We should not return an error if the Namespace was not found.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(kerrors.NewNotFound(schema.GroupResource{}, "")), - }, - }), - }, - }, - want: want{ - r: reconcile.Result{}, - }, - }, - "GetNamespaceError": { - reason: "We should return any other error encountered while getting a Namespace.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(errBoom), - }, - }), - }, - }, - want: want{ - err: errors.Wrap(errBoom, errGetNamespace), - }, - }, - "NamespaceDeleted": { - reason: "We should return early if the namespace was deleted.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil, func(o client.Object) error { - d := o.(*corev1.Namespace) - d.SetDeletionTimestamp(&now) - return nil - }), - }, - }), - }, - }, - want: want{ - r: reconcile.Result{Requeue: false}, - }, - }, - "ListClusterRolesError": { - reason: "We should return an error encountered listing ClusterRoles.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(errBoom), - }, - }), - }, - }, - want: want{ - err: errors.Wrap(errBoom, errListRoles), - }, - }, - "ApplyRoleError": { - reason: "We should return an error encountered applying a Role.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(nil), - }, - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return errBoom - }), - }), - WithRoleRenderer(RoleRenderFn(func(*corev1.Namespace, []rbacv1.ClusterRole) []rbacv1.Role { - return []rbacv1.Role{{}} - })), - }, - }, - want: want{ - err: errors.Wrap(errBoom, errApplyRole), - }, - }, - "SuccessfulNoOp": { - reason: "We should not requeue when no Roles need applying.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(nil), - }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, ao ...resource.ApplyOption) error { - // Simulate a no-op change by not allowing the update. - return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) - }), - }), - WithRoleRenderer(RoleRenderFn(func(*corev1.Namespace, []rbacv1.ClusterRole) []rbacv1.Role { - return []rbacv1.Role{{}} - })), - }, - }, - want: want{ - r: reconcile.Result{Requeue: false}, - }, - }, - "SuccessfulApply": { - reason: "We should not requeue when we successfully apply our Roles.", - args: args{ - mgr: &fake.Manager{}, - opts: []ReconcilerOption{ - WithClientApplicator(resource.ClientApplicator{ - Client: &test.MockClient{ - MockGet: test.NewMockGetFn(nil), - MockList: test.NewMockListFn(nil), - }, - Applicator: resource.ApplyFn(func(context.Context, client.Object, ...resource.ApplyOption) error { - return nil - }), - }), - WithRoleRenderer(RoleRenderFn(func(*corev1.Namespace, []rbacv1.ClusterRole) []rbacv1.Role { - return []rbacv1.Role{{}} - })), - }, - }, - want: want{ - r: reconcile.Result{Requeue: false}, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - r := NewReconciler(tc.args.mgr, append(tc.args.opts, WithLogger(testLog))...) - got, err := r.Reconcile(context.Background(), reconcile.Request{}) - - if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want error, +got error:\n%s", tc.reason, diff) - } - if diff := cmp.Diff(tc.want.r, got, test.EquateErrors()); diff != "" { - t.Errorf("\n%s\nr.Reconcile(...): -want, +got:\n%s", tc.reason, diff) - } - }) - } -} - -func TestRolesDiffer(t *testing.T) { - cases := map[string]struct { - current runtime.Object - desired runtime.Object - want bool - }{ - "Equal": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - want: false, - }, - "EqualMixedNonCrossplane": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "rbac.crossplane.io/a": "a", - "not-managed-by-crossplane/b": "b", - }, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - want: false, - }, - "AnnotationsDiffer": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/b": "b"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - want: true, - }, - "RulesDiffer": { - current: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - Rules: []rbacv1.PolicyRule{{}}, - }, - desired: &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"rbac.crossplane.io/a": "a"}, - }, - }, - want: true, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - got := RolesDiffer(tc.current, tc.desired) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("RolesDiffer(...): -want, +got\n:%s", diff) - } - }) - } -} diff --git a/internal/controller/rbac/namespace/roles.go b/internal/controller/rbac/namespace/roles.go deleted file mode 100644 index dfb1133c0..000000000 --- a/internal/controller/rbac/namespace/roles.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICEE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIO OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "sort" - "strings" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/crossplane/crossplane-runtime/pkg/meta" -) - -const ( - nameAdmin = "crossplane-admin" - nameEdit = "crossplane-edit" - nameView = "crossplane-view" - - keyPrefix = "rbac.crossplane.io/" - - keyAggToAdmin = keyPrefix + "aggregate-to-ns-admin" - keyAggToEdit = keyPrefix + "aggregate-to-ns-edit" - keyAggToView = keyPrefix + "aggregate-to-ns-view" - - keyBaseOfAdmin = keyPrefix + "base-of-ns-admin" - keyBaseOfEdit = keyPrefix + "base-of-ns-edit" - keyBaseOfView = keyPrefix + "base-of-ns-view" - - keyXRD = keyPrefix + "xrd" - - keyAggregated = "aggregated-by-crossplane" - - valTrue = "true" - valAccept = "xrd-claim-accepted" -) - -// RenderRoles for the supplied namespace by aggregating rules from the supplied -// cluster roles. -func RenderRoles(ns *corev1.Namespace, crs []rbacv1.ClusterRole) []rbacv1.Role { - // Our list of CRs has no guaranteed order, so we sort them in order to - // ensure we don't reorder our RBAC rules on each update. - sort.Slice(crs, func(i, j int) bool { return crs[i].GetName() < crs[j].GetName() }) - - admin := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.GetName(), - Name: nameAdmin, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - } - edit := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.GetName(), - Name: nameEdit, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - } - view := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns.GetName(), - Name: nameView, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - } - - gvk := schema.GroupVersionKind{Version: "v1", Kind: "Namespace"} - meta.AddOwnerReference(admin, meta.AsController(meta.TypedReferenceTo(ns, gvk))) - meta.AddOwnerReference(edit, meta.AsController(meta.TypedReferenceTo(ns, gvk))) - meta.AddOwnerReference(view, meta.AsController(meta.TypedReferenceTo(ns, gvk))) - - accepts := map[string]bool{} - for k, v := range ns.GetAnnotations() { - if strings.HasPrefix(k, keyPrefix) && v == valAccept { - accepts[strings.TrimPrefix(k, keyPrefix)] = true - } - } - - acrs := crSelector{keyAggToAdmin, keyBaseOfAdmin, accepts} - ecrs := crSelector{keyAggToEdit, keyBaseOfEdit, accepts} - vcrs := crSelector{keyAggToView, keyBaseOfView, accepts} - - // TODO(negz): Annotate rendered Roles to indicate which ClusterRoles they - // are aggregating rules from? This aggregation is likely to be surprising - // to the uninitiated. - for _, cr := range crs { - if acrs.Select(cr) { - admin.Rules = append(admin.Rules, cr.Rules...) - } - - if ecrs.Select(cr) { - edit.Rules = append(edit.Rules, cr.Rules...) - } - - if vcrs.Select(cr) { - view.Rules = append(view.Rules, cr.Rules...) - } - } - - return []rbacv1.Role{*admin, *edit, *view} -} - -type crSelector struct { - keyAgg string - keyBase string - accepts map[string]bool -} - -func (s crSelector) Select(cr rbacv1.ClusterRole) bool { - l := cr.GetLabels() - - // All cluster roles must have an aggregation key to be selected. - if l[s.keyAgg] != valTrue { - return false - } - - // Cluster roles must either be the base of this role, or pertain to an XRD - // that this namespace accepts a claim from. - return l[s.keyBase] == valTrue || s.accepts[l[keyXRD]] -} diff --git a/internal/controller/rbac/namespace/roles_test.go b/internal/controller/rbac/namespace/roles_test.go deleted file mode 100644 index 9e2b484b8..000000000 --- a/internal/controller/rbac/namespace/roles_test.go +++ /dev/null @@ -1,309 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -func TestCRSelector(t *testing.T) { - xrdName := "composites.example.org" - - type fields struct { - keyAgg string - keyBase string - accepts map[string]bool - } - - cases := map[string]struct { - reason string - fields fields - cr rbacv1.ClusterRole - want bool - }{ - "MissingAggregationLabel": { - reason: "Only ClusterRoles with the aggregation label should be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyBaseOfAdmin: valTrue, - }}}, - want: false, - }, - "OnlyAggregationLabel": { - reason: "ClusterRoles must have either the base label or the label of an accepted XRD to be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - }}}, - want: false, - }, - "IsBaseRole": { - reason: "ClusterRoles with the aggregation and base labels should be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - }}}, - want: true, - }, - "IsAcceptedXRDRole": { - reason: "ClusterRoles with the aggregation and an accepted XRD label should be selected", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyXRD: xrdName, - }}}, - want: true, - }, - "IsUnknownXRDRole": { - reason: "ClusterRoles with the aggregation label but an unknown XRD label should be ignored", - fields: fields{ - keyAgg: keyAggToAdmin, - keyBase: keyBaseOfAdmin, - accepts: map[string]bool{xrdName: true}, - }, - cr: rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyXRD: "unknown.example.org", // An XRD we don't accept. - }}}, - want: false, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - crs := crSelector{tc.fields.keyAgg, tc.fields.keyBase, tc.fields.accepts} - got := crs.Select(tc.cr) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("crs.Select(...): -want, +got:\n%s\n", diff) - } - }) - } - -} - -func TestRenderClusterRoles(t *testing.T) { - name := "spacename" - uid := types.UID("no-you-id") - - ctrl := true - owner := metav1.OwnerReference{ - APIVersion: "v1", - Kind: "Namespace", - Name: name, - UID: uid, - Controller: &ctrl, - BlockOwnerDeletion: &ctrl, - } - - crNameA := "A" - crNameB := "B" - crNameC := "C" - - ruleA := rbacv1.PolicyRule{APIGroups: []string{"A"}} - ruleB := rbacv1.PolicyRule{APIGroups: []string{"B"}} - ruleC := rbacv1.PolicyRule{APIGroups: []string{"C"}} - - xrdName := "guilty-gear-xrd" - - type args struct { - ns *corev1.Namespace - crs []rbacv1.ClusterRole - } - - cases := map[string]struct { - reason string - args args - want []rbacv1.Role - }{ - "APlainOldNamespace": { - reason: "A namespace with no annotations should get admin, edit, and view roles with only base rules, if any exist.", - args: args{ - ns: &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid}}, - crs: []rbacv1.ClusterRole{ - { - // This role's rules should be aggregated to the admin role. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameA, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - }, - }, - Rules: []rbacv1.PolicyRule{ruleA}, - }, - { - // This role's rules should also be aggregated to the admin role. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameB, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - }, - }, - Rules: []rbacv1.PolicyRule{ruleB}, - }, - { - // This role doesn't have any interesting labels. It should not be aggregated. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameC, - Labels: map[string]string{}, - }, - Rules: []rbacv1.PolicyRule{ruleC}, - }, - }, - }, - want: []rbacv1.Role{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameAdmin, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleA, ruleB}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameEdit, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameView, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - }, - }, - }, - "ANamespaceThatAcceptsClaims": { - reason: "A namespace that is annotated to accept claims should get admin, edit, and view roles with base and XRD rules, if they exist.", - args: args{ - ns: &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - UID: uid, - Annotations: map[string]string{keyPrefix + xrdName: valAccept}, - }, - }, - crs: []rbacv1.ClusterRole{ - { - // This role's rules should be aggregated to the admin and edit roles. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameA, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyBaseOfAdmin: valTrue, - keyAggToEdit: valTrue, - keyBaseOfEdit: valTrue, - }, - }, - Rules: []rbacv1.PolicyRule{ruleA}, - }, - { - // This role's rules should also be aggregated to the admin and edit roles. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameB, - Labels: map[string]string{ - keyAggToAdmin: valTrue, - keyAggToEdit: valTrue, - keyXRD: xrdName, // The namespace accepts the claim this XRD offers. - }, - }, - Rules: []rbacv1.PolicyRule{ruleB}, - }, - { - // This role's rules should be aggregated to the view role. - ObjectMeta: metav1.ObjectMeta{ - Name: crNameC, - Labels: map[string]string{ - keyAggToView: valTrue, - keyXRD: xrdName, // The namespace accepts the claim this XRD offers. - }, - }, - Rules: []rbacv1.PolicyRule{ruleC}, - }, - }, - }, - want: []rbacv1.Role{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameAdmin, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleA, ruleB}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameEdit, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleA, ruleB}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: name, - Name: nameView, - OwnerReferences: []metav1.OwnerReference{owner}, - Annotations: map[string]string{keyPrefix + keyAggregated: valTrue}, - }, - Rules: []rbacv1.PolicyRule{ruleC}, - }, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - got := RenderRoles(tc.args.ns, tc.args.crs) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("\n%s\nRenderRoles(...): -want, +got:\n%s\n", tc.reason, diff) - } - }) - } -} diff --git a/internal/controller/rbac/namespace/watch.go b/internal/controller/rbac/namespace/watch.go deleted file mode 100644 index 1cc802747..000000000 --- a/internal/controller/rbac/namespace/watch.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "strings" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type adder interface { - Add(item any) -} - -// EnqueueRequestForNamespaces enqueues a reconcile for all namespaces whenever -// a ClusterRole with the aggregation labels we're concerned with changes. This -// is unusual, but we expect there to be relatively few ClusterRoles, and we -// have no way of relating a specific ClusterRoles back to the Roles that -// aggregate it. This is the approach the upstream aggregation controller uses. -// https://github.com/kubernetes/kubernetes/blob/323f348/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go#L188 -type EnqueueRequestForNamespaces struct { - client client.Reader -} - -// Create adds a NamespacedName for the supplied CreateEvent if its Object is an -// aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.Object, q) -} - -// Update adds a NamespacedName for the supplied UpdateEvent if its Object is an -// aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.ObjectOld, q) - e.add(ctx, evt.ObjectNew, q) -} - -// Delete adds a NamespacedName for the supplied DeleteEvent if its Object is an -// aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.Object, q) -} - -// Generic adds a NamespacedName for the supplied GenericEvent if its Object is -// an aggregated ClusterRole. -func (e *EnqueueRequestForNamespaces) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { - e.add(ctx, evt.Object, q) -} - -func (e *EnqueueRequestForNamespaces) add(ctx context.Context, obj runtime.Object, queue adder) { - cr, ok := obj.(*rbacv1.ClusterRole) - if !ok { - return - } - - if !aggregates(cr) { - return - } - - l := &corev1.NamespaceList{} - if err := e.client.List(ctx, l); err != nil { - return - } - - for _, ns := range l.Items { - queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: ns.GetName()}}) - } - -} - -func aggregates(obj metav1.Object) bool { - for k := range obj.GetLabels() { - if strings.HasPrefix(k, keyPrefix) { - return true - } - } - return false -} diff --git a/internal/controller/rbac/namespace/watch_test.go b/internal/controller/rbac/namespace/watch_test.go deleted file mode 100644 index 2872db74a..000000000 --- a/internal/controller/rbac/namespace/watch_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2020 The Crossplane Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/crossplane/crossplane-runtime/pkg/errors" - "github.com/crossplane/crossplane-runtime/pkg/test" -) - -var ( - _ handler.EventHandler = &EnqueueRequestForNamespaces{} -) - -type addFn func(item any) - -func (fn addFn) Add(item any) { - fn(item) -} - -func TestAdd(t *testing.T) { - name := "coolname" - - cases := map[string]struct { - client client.Reader - ctx context.Context - obj runtime.Object - queue adder - }{ - "ObjectIsNotAClusterRole": { - queue: addFn(func(_ any) { t.Errorf("queue.Add() called unexpectedly") }), - }, - "ClusterRoleIsNotAggregated": { - obj: &rbacv1.ClusterRole{}, - queue: addFn(func(_ any) { t.Errorf("queue.Add() called unexpectedly") }), - }, - "ListNamespacesError": { - client: &test.MockClient{ - MockList: test.NewMockListFn(errors.New("boom")), - }, - obj: &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{keyAggToAdmin: valTrue}}}, - queue: addFn(func(_ any) { t.Errorf("queue.Add() called unexpectedly") }), - }, - "SuccessfulEnqueue": { - client: &test.MockClient{ - MockList: test.NewMockListFn(nil, func(o client.ObjectList) error { - nsl := o.(*corev1.NamespaceList) - *nsl = corev1.NamespaceList{Items: []corev1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: name}}}} - return nil - }), - }, - obj: &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{keyAggToAdmin: valTrue}}}, - queue: addFn(func(got any) { - want := reconcile.Request{NamespacedName: types.NamespacedName{Name: name}} - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("-want, +got:\n%s\n", diff) - } - }), - }, - } - - for _, tc := range cases { - e := &EnqueueRequestForNamespaces{client: tc.client} - e.add(tc.ctx, tc.obj, tc.queue) - } -} diff --git a/internal/controller/rbac/provider/binding/reconciler.go b/internal/controller/rbac/provider/binding/reconciler.go index 0d6333413..ef3758521 100644 --- a/internal/controller/rbac/provider/binding/reconciler.go +++ b/internal/controller/rbac/provider/binding/reconciler.go @@ -134,8 +134,7 @@ type Reconciler struct { // Reconcile a ProviderRevision by creating a ClusterRoleBinding that binds a // provider's service account to its system ClusterRole. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Reconcile methods are often very complex. Be wary. - +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -242,7 +241,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco // consider ClusterRoleBindings to be different if the subjects, the roleRefs, or the owner ref // is different. func ClusterRoleBindingsDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.ClusterRoleBinding) - d := desired.(*rbacv1.ClusterRoleBinding) + // Calling this with anything but ClusterRoleBindings is a programming + // error. If it happens, we probably do want to panic. + c := current.(*rbacv1.ClusterRoleBinding) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.ClusterRoleBinding) //nolint:forcetypeassert // See above. return !cmp.Equal(c.Subjects, d.Subjects) || !cmp.Equal(c.RoleRef, d.RoleRef) || !cmp.Equal(c.GetOwnerReferences(), d.GetOwnerReferences()) } diff --git a/internal/controller/rbac/provider/roles/fuzz_test.go b/internal/controller/rbac/provider/roles/fuzz_test.go index f09ec0234..337046074 100644 --- a/internal/controller/rbac/provider/roles/fuzz_test.go +++ b/internal/controller/rbac/provider/roles/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzRenderClusterRoles(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) pr := &v1.ProviderRevision{} ff.GenerateStruct(pr) diff --git a/internal/controller/rbac/provider/roles/reconciler.go b/internal/controller/rbac/provider/roles/reconciler.go index b2909b5f2..df35d3e91 100644 --- a/internal/controller/rbac/provider/roles/reconciler.go +++ b/internal/controller/rbac/provider/roles/reconciler.go @@ -115,7 +115,8 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { wrh := &EnqueueRequestForAllRevisionsWithRequests{ client: mgr.GetClient(), - clusterRoleName: o.AllowClusterRole} + clusterRoleName: o.AllowClusterRole, + } sfh := &EnqueueRequestForAllRevisionsInFamily{ client: mgr.GetClient(), @@ -228,7 +229,7 @@ type Reconciler struct { // Reconcile a ProviderRevision by creating a series of opinionated ClusterRoles // that may be bound to allow access to the resources it defines. -func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocyclo // Slightly over (13). +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { //nolint:gocognit // Slightly over (13). log := r.log.WithValues("request", req) log.Debug("Reconciling") @@ -332,7 +333,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco applied := make([]string, 0) for _, cr := range r.rbac.RenderClusterRoles(pr, resources) { - cr := cr // Pin range variable so we can take its address. log := log.WithValues("role-name", cr.GetName()) origRV := "" err := r.client.Apply(ctx, &cr, @@ -373,7 +373,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco func DefinedResources(refs []xpv1.TypedReference) []Resource { out := make([]Resource, 0, len(refs)) for _, ref := range refs { - // This would only return an error if the APIVersion contained more than // one "/". This should be impossible, but if it somehow happens we'll // just skip this resource since it can't be a CRD. @@ -399,8 +398,10 @@ func DefinedResources(refs []xpv1.TypedReference) []Resource { // ClusterRoles. We consider ClusterRoles to be different if their labels and // rules do not match. func ClusterRolesDiffer(current, desired runtime.Object) bool { - c := current.(*rbacv1.ClusterRole) - d := desired.(*rbacv1.ClusterRole) + // Calling this with anything but ClusterRoles is a programming error. If it + // happens, we probably do want to panic. + c := current.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. + d := desired.(*rbacv1.ClusterRole) //nolint:forcetypeassert // See above. return !cmp.Equal(c.GetLabels(), d.GetLabels()) || !cmp.Equal(c.Rules, d.Rules) } diff --git a/internal/controller/rbac/provider/roles/reconciler_test.go b/internal/controller/rbac/provider/roles/reconciler_test.go index 80c530e26..2bcac3235 100644 --- a/internal/controller/rbac/provider/roles/reconciler_test.go +++ b/internal/controller/rbac/provider/roles/reconciler_test.go @@ -152,7 +152,7 @@ func TestReconcile(t *testing.T) { MockList: test.NewMockListFn(nil), }, }), - WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(ctx context.Context, requested ...rbacv1.PolicyRule) ([]Rule, error) { + WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(_ context.Context, _ ...rbacv1.PolicyRule) ([]Rule, error) { return nil, errBoom })), }, @@ -172,7 +172,7 @@ func TestReconcile(t *testing.T) { MockList: test.NewMockListFn(nil), }, }), - WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(ctx context.Context, requested ...rbacv1.PolicyRule) ([]Rule, error) { + WithPermissionRequestsValidator(PermissionRequestsValidatorFn(func(_ context.Context, _ ...rbacv1.PolicyRule) ([]Rule, error) { return []Rule{{}}, nil })), }, @@ -214,7 +214,7 @@ func TestReconcile(t *testing.T) { MockGet: test.NewMockGetFn(nil), MockList: test.NewMockListFn(nil), }, - Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, ao ...resource.ApplyOption) error { + Applicator: resource.ApplyFn(func(ctx context.Context, o client.Object, _ ...resource.ApplyOption) error { // Simulate a no-op change by not allowing the update. return resource.AllowUpdateIf(func(_, _ runtime.Object) bool { return false })(ctx, o, o) }), diff --git a/internal/controller/rbac/provider/roles/requests.go b/internal/controller/rbac/provider/roles/requests.go index 939a901d9..1ec1c0126 100644 --- a/internal/controller/rbac/provider/roles/requests.go +++ b/internal/controller/rbac/provider/roles/requests.go @@ -128,7 +128,7 @@ func (r Rule) path() path { } // Expand RBAC policy rules into our granular rules. -func Expand(ctx context.Context, rs ...rbacv1.PolicyRule) ([]Rule, error) { //nolint:gocyclo // Granular rules are inherently complex. +func Expand(ctx context.Context, rs ...rbacv1.PolicyRule) ([]Rule, error) { //nolint:gocognit // Granular rules are inherently complex. out := make([]Rule, 0, len(rs)) for _, r := range rs { for _, u := range r.NonResourceURLs { diff --git a/internal/controller/rbac/provider/roles/roles.go b/internal/controller/rbac/provider/roles/roles.go index e39bb92d1..f3a83c64b 100644 --- a/internal/controller/rbac/provider/roles/roles.go +++ b/internal/controller/rbac/provider/roles/roles.go @@ -51,6 +51,7 @@ const ( pluralLeases = "leases" ) +//nolint:gochecknoglobals // We treat these as constants. var ( verbsEdit = []string{rbacv1.VerbAll} verbsView = []string{"get", "list", "watch"} @@ -66,6 +67,8 @@ var ( // * ConfigMaps for leader election. // * Leases for leader election. // * Events for debugging. +// +//nolint:gochecknoglobals // We treat this as a constant. var rulesSystemExtra = []rbacv1.PolicyRule{ { APIGroups: []string{"", coordinationv1.GroupName}, diff --git a/internal/controller/rbac/provider/roles/watch_test.go b/internal/controller/rbac/provider/roles/watch_test.go index fd00154ea..d1abf1b89 100644 --- a/internal/controller/rbac/provider/roles/watch_test.go +++ b/internal/controller/rbac/provider/roles/watch_test.go @@ -35,9 +35,7 @@ import ( v1 "github.com/crossplane/crossplane/apis/pkg/v1" ) -var ( - _ handler.EventHandler = &EnqueueRequestForAllRevisionsWithRequests{} -) +var _ handler.EventHandler = &EnqueueRequestForAllRevisionsWithRequests{} type addFn func(item any) diff --git a/internal/controller/rbac/rbac.go b/internal/controller/rbac/rbac.go index ce6922cc3..3e73aca95 100644 --- a/internal/controller/rbac/rbac.go +++ b/internal/controller/rbac/rbac.go @@ -22,7 +22,6 @@ import ( "github.com/crossplane/crossplane/internal/controller/rbac/controller" "github.com/crossplane/crossplane/internal/controller/rbac/definition" - "github.com/crossplane/crossplane/internal/controller/rbac/namespace" "github.com/crossplane/crossplane/internal/controller/rbac/provider/binding" "github.com/crossplane/crossplane/internal/controller/rbac/provider/roles" ) @@ -38,10 +37,5 @@ func Setup(mgr ctrl.Manager, o controller.Options) error { return err } } - - if o.ManagementPolicy != controller.ManagementPolicyAll { - return nil - } - - return namespace.Setup(mgr, o) + return nil } diff --git a/internal/dag/dag.go b/internal/dag/dag.go index 37d3e2ac5..f89eb64d2 100644 --- a/internal/dag/dag.go +++ b/internal/dag/dag.go @@ -29,15 +29,15 @@ type Node interface { // Node implementations should be careful to establish uniqueness of // neighbors in their AddNeighbors method or risk counting a neighbor // multiple times. - AddNeighbors(...Node) error + AddNeighbors(ns ...Node) error } // DAG is a Directed Acyclic Graph. -type DAG interface { - Init(nodes []Node) ([]Node, error) - AddNode(Node) error - AddNodes(...Node) error - AddOrUpdateNodes(...Node) +type DAG interface { //nolint:interfacebloat // TODO(negz): Could this be several smaller interfaces? + Init(ns []Node) ([]Node, error) + AddNode(n Node) error + AddNodes(ns ...Node) error + AddOrUpdateNodes(ns ...Node) GetNode(identifier string) (Node, error) AddEdge(from string, to Node) (bool, error) AddEdges(edges map[string][]Node) ([]Node, error) diff --git a/internal/dag/dag_test.go b/internal/dag/dag_test.go index e4f16816e..8da0a2fb0 100644 --- a/internal/dag/dag_test.go +++ b/internal/dag/dag_test.go @@ -37,7 +37,6 @@ func (s *simpleNode) Neighbors() []Node { nodes := make([]Node, len(s.neighbors)) i := 0 for _, r := range s.neighbors { - r := r // Pin range variable so we can take its address. nodes[i] = &r i++ } @@ -58,14 +57,15 @@ func (s *simpleNode) AddNeighbors(nodes ...Node) error { func toNodes(n []simpleNode) []Node { nodes := make([]Node, len(n)) for i, r := range n { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes } -var _ DAG = &MapDag{} -var _ NewDAGFn = NewMapDag +var ( + _ DAG = &MapDag{} + _ NewDAGFn = NewMapDag +) func sortedFnNop([]simpleNode, []string) error { return nil diff --git a/internal/dag/fuzz_test.go b/internal/dag/fuzz_test.go index 5d7700ae8..a05fbc21e 100644 --- a/internal/dag/fuzz_test.go +++ b/internal/dag/fuzz_test.go @@ -31,7 +31,6 @@ type SimpleFuzzNode struct { func toNodesFuzz(n []SimpleFuzzNode) []Node { nodes := make([]Node, len(n)) for i, r := range n { - r := r // Pin range variable so we can take its address. nodes[i] = &r } return nodes @@ -59,7 +58,6 @@ func (s *SimpleFuzzNode) Neighbors() []Node { nodes := make([]Node, len(s.NeighborsField)) i := 0 for _, r := range s.NeighborsField { - r := r // Pin range variable so we can take its address. nodes[i] = &r i++ } @@ -67,7 +65,7 @@ func (s *SimpleFuzzNode) Neighbors() []Node { } func FuzzDag(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { c := fuzz.NewConsumer(data) nodes := make([]SimpleFuzzNode, 0) err := c.CreateSlice(&nodes) diff --git a/internal/engine/cache.go b/internal/engine/cache.go new file mode 100644 index 000000000..c1ccca089 --- /dev/null +++ b/internal/engine/cache.go @@ -0,0 +1,189 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +var ( + _ cache.Cache = &InformerTrackingCache{} + _ TrackingInformers = &InformerTrackingCache{} +) + +// An InformerTrackingCache wraps a cache.Cache and keeps track of what GVKs it +// has started informers for. It takes a blocking lock whenever a new informer +// is started or stopped, but so does the standard controller-runtime Cache +// implementation. +type InformerTrackingCache struct { + // The wrapped cache. + cache.Cache + + scheme *runtime.Scheme + + mx sync.RWMutex + active map[schema.GroupVersionKind]bool +} + +// TrackInformers wraps the supplied cache, adding a method to query which +// informers are active. +func TrackInformers(c cache.Cache, s *runtime.Scheme) *InformerTrackingCache { + return &InformerTrackingCache{ + Cache: c, + scheme: s, + active: make(map[schema.GroupVersionKind]bool), + } +} + +// ActiveInformers returns the GVKs of the informers believed to currently be +// active. The InformerTrackingCache considers an informer to become active when +// a caller calls Get, List, or one of the GetInformer methods. It considers an +// informer to become inactive when a caller calls the RemoveInformer method. +func (c *InformerTrackingCache) ActiveInformers() []schema.GroupVersionKind { + c.mx.RLock() + defer c.mx.RUnlock() + + out := make([]schema.GroupVersionKind, 0, len(c.active)) + for gvk := range c.active { + out = append(out, gvk) + } + return out +} + +// Get retrieves an obj for the given object key from the Kubernetes Cluster. +// obj must be a struct pointer so that obj can be updated with the response +// returned by the Server. +// +// Getting an object marks the informer for the object's GVK active. +func (c *InformerTrackingCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.Get(ctx, key, obj, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.Get(ctx, key, obj, opts...) +} + +// List retrieves list of objects for a given namespace and list options. On a +// successful call, Items field in the list will be populated with the result +// returned from the server. +// +// Listing objects marks the informer for the object's GVK active. +func (c *InformerTrackingCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.List(ctx, list, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.List(ctx, list, opts...) +} + +// GetInformer fetches or constructs an informer for the given object that +// corresponds to a single API kind and resource. +// +// Getting an informer for an object marks the informer as active. +func (c *InformerTrackingCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.GetInformer(ctx, obj, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.GetInformer(ctx, obj, opts...) +} + +// GetInformerForKind is similar to GetInformer, except that it takes a +// group-version-kind, instead of the underlying object. +// +// Getting an informer marks the informer as active. +func (c *InformerTrackingCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + c.mx.RLock() + if _, active := c.active[gvk]; active { + defer c.mx.RUnlock() + return c.Cache.GetInformerForKind(ctx, gvk, opts...) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + c.active[gvk] = true + return c.Cache.GetInformerForKind(ctx, gvk, opts...) +} + +// RemoveInformer removes an informer entry and stops it if it was running. +// +// Removing an informer marks the informer as inactive. +func (c *InformerTrackingCache) RemoveInformer(ctx context.Context, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return errors.Wrap(err, "cannot determine group, version, and kind of supplied object") + } + + c.mx.RLock() + if _, active := c.active[gvk]; !active { + // This should only happen if RemoveInformer is called for an informer + // that was never started. + defer c.mx.RUnlock() + return c.Cache.RemoveInformer(ctx, obj) + } + c.mx.RUnlock() + + c.mx.Lock() + defer c.mx.Unlock() + delete(c.active, gvk) + return c.Cache.RemoveInformer(ctx, obj) +} diff --git a/internal/engine/cache_test.go b/internal/engine/cache_test.go new file mode 100644 index 000000000..8fac54e60 --- /dev/null +++ b/internal/engine/cache_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ cache.Cache = &MockCache{} + +type MockCache struct { + cache.Cache + + MockGet func(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + MockList func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error + MockGetInformer func(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) + MockGetInformerForKind func(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) + MockRemoveInformer func(ctx context.Context, obj client.Object) error +} + +func (m *MockCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return m.MockGet(ctx, key, obj, opts...) +} + +func (m *MockCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return m.MockList(ctx, list, opts...) +} + +func (m *MockCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + return m.MockGetInformer(ctx, obj, opts...) +} + +func (m *MockCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + return m.MockGetInformerForKind(ctx, gvk, opts...) +} + +func (m *MockCache) RemoveInformer(ctx context.Context, obj client.Object) error { + return m.MockRemoveInformer(ctx, obj) +} + +func TestActiveInformers(t *testing.T) { + c := &MockCache{ + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object, _ ...client.GetOption) error { + return nil + }, + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { + return nil + }, + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return nil, nil + }, + MockGetInformerForKind: func(_ context.Context, _ schema.GroupVersionKind, _ ...cache.InformerGetOption) (cache.Informer, error) { + return nil, nil + }, + MockRemoveInformer: func(_ context.Context, _ client.Object) error { return nil }, + } + + itc := TrackInformers(c, runtime.NewScheme()) + + ctx := context.Background() + + // We intentionally call methods twice to cover the code paths where we + // don't start tracking an informer because we already track it (and vice + // versa for remove). + + // Get a GVK + get := &unstructured.Unstructured{} + get.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Get", + }) + _ = itc.Get(ctx, client.ObjectKeyFromObject(get), get) + _ = itc.Get(ctx, client.ObjectKeyFromObject(get), get) + + // List a GVK + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "ListList", // It's a list list! + }) + _ = itc.List(ctx, list) + _ = itc.List(ctx, list) + + // Get an informer + getinf := &unstructured.Unstructured{} + getinf.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformer", + }) + _, _ = itc.GetInformer(ctx, getinf) + _, _ = itc.GetInformer(ctx, getinf) + + // Get an informer by GVK + getgvk := schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformerForKind", + } + _, _ = itc.GetInformerForKind(ctx, getgvk) + _, _ = itc.GetInformerForKind(ctx, getgvk) + + // Get a GVK, then remove its informer. + remove := &unstructured.Unstructured{} + remove.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "RemoveMe", + }) + _ = itc.Get(ctx, client.ObjectKeyFromObject(remove), remove) + _ = itc.RemoveInformer(ctx, remove) + _ = itc.RemoveInformer(ctx, remove) + + want := []schema.GroupVersionKind{ + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "Get", + }, + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "List", + }, + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformer", + }, + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "GetInformerForKind", + }, + } + + got := itc.ActiveInformers() + if diff := cmp.Diff(want, got, cmpopts.SortSlices(func(a, b schema.GroupVersionKind) bool { return a.String() > b.String() })); diff != "" { + t.Errorf("\nitc.ActiveInformers(...): -want, +got:\n%s", diff) + } +} diff --git a/internal/engine/engine.go b/internal/engine/engine.go new file mode 100644 index 000000000..f7688ab2a --- /dev/null +++ b/internal/engine/engine.go @@ -0,0 +1,539 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package engine manages the lifecycle of a set of controllers. +package engine + +import ( + "context" + "sync" + "time" + + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + kcache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/logging" +) + +// A ControllerEngine manages a set of controllers that can be dynamically +// started and stopped. It also manages a dynamic set of watches per controller, +// and the informers that back them. +type ControllerEngine struct { + // The manager of this engine's controllers. Controllers managed by the + // engine use the engine's client and cache, not the manager's. + mgr manager.Manager + + // The engine must have exclusive use of these informers. All controllers + // managed by the engine should use these informers. + infs TrackingInformers + + // The client used by the engine's controllers. The client must be backed by + // the above TrackingInformers. + client client.Client + + log logging.Logger + + // Protects everything below. + mx sync.RWMutex + + // Running controllers, by name. + controllers map[string]*controller +} + +// TrackingInformers is a set of Informers. It tracks which are active. +type TrackingInformers interface { + cache.Informers + ActiveInformers() []schema.GroupVersionKind +} + +// New creates a new controller engine. +func New(mgr manager.Manager, infs TrackingInformers, c client.Client, o ...ControllerEngineOption) *ControllerEngine { + e := &ControllerEngine{ + mgr: mgr, + infs: infs, + client: c, + log: logging.NewNopLogger(), + controllers: make(map[string]*controller), + } + + for _, fn := range o { + fn(e) + } + + return e +} + +// An ControllerEngineOption configures a controller engine. +type ControllerEngineOption func(*ControllerEngine) + +// WithLogger configures an Engine to use a logger. +func WithLogger(l logging.Logger) ControllerEngineOption { + return func(e *ControllerEngine) { + e.log = l + } +} + +type controller struct { + // The running controller. + ctrl kcontroller.Controller + + // Called to stop the controller. + cancel context.CancelFunc + + // Protects the below map. + mx sync.RWMutex + + // The controller's sources, by watched GVK. + sources map[WatchID]*StoppableSource +} + +// A WatchGarbageCollector periodically garbage collects watches. +type WatchGarbageCollector interface { + GarbageCollectWatches(ctx context.Context, interval time.Duration) +} + +// A NewControllerFn can start a new controller-runtime controller. +type NewControllerFn func(name string, mgr manager.Manager, options kcontroller.Options) (kcontroller.Controller, error) + +// ControllerOptions configure a controller. +type ControllerOptions struct { + runtime kcontroller.Options + nc NewControllerFn + gc WatchGarbageCollector +} + +// A ControllerOption configures a controller. +type ControllerOption func(o *ControllerOptions) + +// WithRuntimeOptions configures the underlying controller-runtime controller. +func WithRuntimeOptions(ko kcontroller.Options) ControllerOption { + return func(o *ControllerOptions) { + o.runtime = ko + } +} + +// WithWatchGarbageCollector specifies an optional garbage collector this +// controller should use to remove unused watches. +func WithWatchGarbageCollector(gc WatchGarbageCollector) ControllerOption { + return func(o *ControllerOptions) { + o.gc = gc + } +} + +// WithNewControllerFn configures how the engine starts a new controller-runtime +// controller. +func WithNewControllerFn(fn NewControllerFn) ControllerOption { + return func(o *ControllerOptions) { + o.nc = fn + } +} + +// GetClient gets a client backed by the controller engine's cache. +func (e *ControllerEngine) GetClient() client.Client { + return e.client +} + +// GetFieldIndexer returns a FieldIndexer that can be used to add indexes to the +// controller engine's cache. +func (e *ControllerEngine) GetFieldIndexer() client.FieldIndexer { + return e.infs +} + +// Start a new controller. +func (e *ControllerEngine) Start(name string, o ...ControllerOption) error { + e.mx.Lock() + defer e.mx.Unlock() + + // Start is a no-op if the controller is already running. + if _, running := e.controllers[name]; running { + return nil + } + + co := &ControllerOptions{nc: kcontroller.NewUnmanaged} + for _, fn := range o { + fn(co) + } + + c, err := co.nc(name, e.mgr, co.runtime) + if err != nil { + return errors.Wrap(err, "cannot create new controller") + } + + // The caller will usually be a reconcile method. We want the controller + // to keep running when the reconcile ends, so we create a new context + // instead of taking one as an argument. + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + // Don't start the controller until the manager is elected. + <-e.mgr.Elected() + + e.log.Debug("Starting new controller", "controller", name) + + // Run the controller until its context is cancelled. + if err := c.Start(ctx); err != nil { + e.log.Info("Controller stopped with an error", "name", name, "error", err) + + // Make a best effort attempt to cleanup the controller so that + // IsRunning will return false. + _ = e.Stop(ctx, name) + return + } + + e.log.Debug("Stopped controller", "controller", name) + }() + + if co.gc != nil { + go func() { + // Don't start the garbage collector until the manager is elected. + <-e.mgr.Elected() + + e.log.Debug("Starting watch garbage collector for controller", "controller", name) + + // Run the collector every minute until its context is cancelled. + co.gc.GarbageCollectWatches(ctx, 1*time.Minute) + + e.log.Debug("Stopped watch garbage collector for controller", "controller", name) + }() + } + + r := &controller{ + ctrl: c, + cancel: cancel, + sources: make(map[WatchID]*StoppableSource), + } + + e.controllers[name] = r + + return nil +} + +// Stop a controller. +func (e *ControllerEngine) Stop(ctx context.Context, name string) error { + e.mx.Lock() + defer e.mx.Unlock() + + c, running := e.controllers[name] + + // Stop is a no-op if the controller isn't running. + if !running { + return nil + } + + c.mx.Lock() + defer c.mx.Unlock() + + // Stop the controller's watches. + for wid, w := range c.sources { + if err := w.Stop(ctx); err != nil { + c.mx.Unlock() + return errors.Wrapf(err, "cannot stop %q watch for %q", wid.Type, wid.GVK) + } + delete(c.sources, wid) + e.log.Debug("Stopped watching GVK", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + } + + // Stop and delete the controller. + c.cancel() + delete(e.controllers, name) + + e.log.Debug("Stopped controller", "controller", name) + return nil +} + +// IsRunning returns true if the named controller is running. +func (e *ControllerEngine) IsRunning(name string) bool { + e.mx.RLock() + defer e.mx.RUnlock() + _, running := e.controllers[name] + return running +} + +// A WatchType uniquely identifies a "type" of watch - i.e. a handler and a set +// of predicates. The controller engine uniquely identifies a Watch by its +// (kind, watch type) tuple. The engine will only start one watch of each (kind, +// watch type) tuple. To watch the same kind of resource multiple times, use +// different watch types. +type WatchType string + +// Common watch types. +const ( + WatchTypeClaim WatchType = "Claim" + WatchTypeCompositeResource WatchType = "CompositeResource" + WatchTypeComposedResource WatchType = "ComposedResource" + WatchTypeCompositionRevision WatchType = "CompositionRevision" +) + +// Watch an object. +type Watch struct { + wt WatchType + kind client.Object + handler handler.EventHandler + predicates []predicate.Predicate +} + +// A WatchID uniquely identifies a watch. +type WatchID struct { + Type WatchType + GVK schema.GroupVersionKind +} + +// WatchFor returns a Watch for the supplied kind of object. Events will be +// handled by the supplied EventHandler, and may be filtered by the supplied +// predicates. +func WatchFor(kind client.Object, wt WatchType, h handler.EventHandler, p ...predicate.Predicate) Watch { + return Watch{kind: kind, wt: wt, handler: h, predicates: p} +} + +// StartWatches instructs the named controller to start the supplied watches. +// The controller will only start a watch if it's not already watching the type +// of object specified by the supplied Watch. StartWatches blocks other +// operations on the same controller if and when it starts a watch. +func (e *ControllerEngine) StartWatches(name string, ws ...Watch) error { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + if !running { + return errors.Errorf("controller %q is not running", name) + } + + // Make sure we can get GVKs for all the watches before we take locks. + gvks := make([]schema.GroupVersionKind, len(ws)) + for i := range ws { + gvk, err := apiutil.GVKForObject(ws[i].kind, e.mgr.GetScheme()) + if err != nil { + return errors.Wrapf(err, "cannot determine group, version, and kind for %T", ws[i].kind) + } + gvks[i] = gvk + } + + // It's possible that we didn't explicitly stop a watch, but its backing + // informer was removed. This implicitly stops the watch by deleting its + // backing listener. If a watch exists but doesn't have an active informer, + // we want to restart the watch (and, implicitly, the informer). + // + // There's a potential race here. Another Goroutine could remove an informer + // between where we build the map and where we read it to check whether an + // informer is active. We wouldn't start a watch when we should. If the + // controller calls StartWatches repeatedly (e.g. an XR controller) this + // will eventually self-correct. + a := e.infs.ActiveInformers() + activeInformer := make(map[schema.GroupVersionKind]bool, len(a)) + for _, gvk := range a { + activeInformer[gvk] = true + } + + // Some controllers will call StartWatches on every reconcile. Most calls + // won't actually need to start a new watch. For example an XR controller + // would only need to start a new watch if an XR composed a new kind of + // resource that no other XR it controls already composes. So, we try to + // avoid taking a write lock and blocking all reconciles unless we need to. + c.mx.RLock() + start := false + for i, w := range ws { + wid := WatchID{Type: w.wt, GVK: gvks[i]} + // We've already created this watch and the informer backing it is still + // running. We don't need to create a new watch. + if _, watchExists := c.sources[wid]; watchExists && activeInformer[wid.GVK] { + e.log.Debug("Watch exists for GVK, not starting a new one", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + continue + } + // There's at least one watch to start. + start = true + break + } + c.mx.RUnlock() + + // Nothing to start. + if !start { + return nil + } + + // We have at least one watch to start - take the write lock. It's possible + // another Goroutine updated this controller's watches since we released the + // read lock, so we compute everything again. + c.mx.Lock() + defer c.mx.Unlock() + + // Start new sources. + for i, w := range ws { + wid := WatchID{Type: w.wt, GVK: gvks[i]} + + // We've already created this watch and the informer backing it is still + // running. We don't need to create a new watch. We don't debug log this + // one - we'll have logged it above unless the watch was added between + // releasing the read lock and taking the write lock. + if _, watchExists := c.sources[wid]; watchExists && activeInformer[wid.GVK] { + continue + } + + // The controller's Watch method just calls the StoppableSource's Start + // method, passing in its private work queue as an argument. This will + // start an informer for the watched kind if there isn't one running + // already. + // + // The watch will stop sending events when either the source is stopped, + // or its backing informer is stopped. The controller's work queue will + // stop processing events when the controller is stopped. + src := NewStoppableSource(e.infs, w.kind, w.handler, w.predicates...) + if err := c.ctrl.Watch(src); err != nil { + return errors.Wrapf(err, "cannot start %q watch for %q", wid.Type, wid.GVK) + } + + // Record that we're now running this source. + c.sources[wid] = src + + e.log.Debug("Started watching GVK", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + } + + return nil +} + +// GetWatches returns the active watches for the supplied controller. +func (e *ControllerEngine) GetWatches(name string) ([]WatchID, error) { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + if !running { + return nil, errors.Errorf("controller %q is not running", name) + } + + c.mx.RLock() + defer c.mx.RUnlock() + + out := make([]WatchID, 0, len(c.sources)) + for wid := range c.sources { + out = append(out, wid) + } + return out, nil +} + +// StopWatches stops the supplied watches. StopWatches blocks other operations +// on the same controller if and when it stops a watch. It returns the number of +// watches that it successfully stopped. +func (e *ControllerEngine) StopWatches(ctx context.Context, name string, ws ...WatchID) (int, error) { + e.mx.RLock() + c, running := e.controllers[name] + e.mx.RUnlock() + + if !running { + return 0, errors.Errorf("controller %q is not running", name) + } + + // Don't take the write lock if we want to keep all watches. + c.mx.RLock() + stop := false + for _, wid := range ws { + if _, watchExists := c.sources[wid]; watchExists { + stop = true + break + } + } + c.mx.RUnlock() + + if !stop { + return 0, nil + } + + // We have at least one watch to stop - take the write lock. It's possible + // another Goroutine updated this controller's watches since we released the + // read lock, so we compute everything again. + c.mx.Lock() + defer c.mx.Unlock() + + stopped := 0 + for _, wid := range ws { + w, watchExists := c.sources[wid] + if !watchExists { + continue + } + if err := w.Stop(ctx); err != nil { + return stopped, errors.Wrapf(err, "cannot stop %q watch for %q", wid.Type, wid.GVK) + } + delete(c.sources, wid) + e.log.Debug("Stopped watching GVK", "controller", name, "watch-type", wid.Type, "watched-gvk", wid.GVK) + stopped++ + } + + return stopped, nil +} + +// GarbageCollectCustomResourceInformers garbage collects informers for custom +// resources (e.g. Crossplane XRs, claims and composed resources) when the CRD +// that defines them is deleted. The garbage collector runs until the supplied +// context is cancelled. +func (e *ControllerEngine) GarbageCollectCustomResourceInformers(ctx context.Context) error { + i, err := e.infs.GetInformer(ctx, &extv1.CustomResourceDefinition{}) + if err != nil { + return errors.Wrap(err, "cannot get informer for CustomResourceDefinitions") + } + + h, err := i.AddEventHandler(kcache.ResourceEventHandlerFuncs{ + DeleteFunc: func(obj interface{}) { + o := obj + if fsu, ok := obj.(kcache.DeletedFinalStateUnknown); ok { + o = fsu.Obj + } + crd, ok := o.(*extv1.CustomResourceDefinition) + if !ok { + // This should never happen. + return + } + + for _, v := range crd.Spec.Versions { + gvk := schema.GroupVersionKind{ + Group: crd.Spec.Group, + Kind: crd.Spec.Names.Kind, + Version: v.Name, + } + + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + + if err := e.infs.RemoveInformer(ctx, u); err != nil { + e.log.Info("Cannot remove informer for type defined by deleted CustomResourceDefinition", "crd", crd.GetName(), "gvk", gvk) + continue + } + + e.log.Debug("Removed informer for type defined by deleted CustomResourceDefinition", "crd", crd.GetName(), "gvk", gvk) + } + }, + }) + if err != nil { + return errors.Wrap(err, "cannot add garbage collector event handler to CustomResourceDefinition informer") + } + + go func() { + <-ctx.Done() + if err := i.RemoveEventHandler(h); err != nil { + e.log.Info("Cannot remove garbage collector event handler from CustomResourceDefinition informer") + } + }() + + return nil +} diff --git a/internal/engine/engine_test.go b/internal/engine/engine_test.go new file mode 100644 index 000000000..62ca8b463 --- /dev/null +++ b/internal/engine/engine_test.go @@ -0,0 +1,897 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + kcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ TrackingInformers = &MockTrackingInformers{} + +type MockTrackingInformers struct { + cache.Informers + + MockActiveInformers func() []schema.GroupVersionKind + MockGetInformer func(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) + MockRemoveInformer func(ctx context.Context, obj client.Object) error +} + +func (m *MockTrackingInformers) ActiveInformers() []schema.GroupVersionKind { + return m.MockActiveInformers() +} + +func (m *MockTrackingInformers) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + return m.MockGetInformer(ctx, obj, opts...) +} + +func (m *MockTrackingInformers) RemoveInformer(ctx context.Context, obj client.Object) error { + return m.MockRemoveInformer(ctx, obj) +} + +var _ manager.Manager = &MockManager{} + +type MockManager struct { + manager.Manager + + MockElected func() <-chan struct{} + MockGetScheme func() *runtime.Scheme +} + +func (m *MockManager) Elected() <-chan struct{} { + return m.MockElected() +} + +func (m *MockManager) GetScheme() *runtime.Scheme { + return m.MockGetScheme() +} + +var _ WatchGarbageCollector = &MockWatchGarbageCollector{} + +type MockWatchGarbageCollector struct { + MockGarbageCollectWatches func(ctx context.Context, interval time.Duration) +} + +func (m *MockWatchGarbageCollector) GarbageCollectWatches(ctx context.Context, interval time.Duration) { + m.MockGarbageCollectWatches(ctx, interval) +} + +var _ kcontroller.Controller = &MockController{} + +type MockController struct { + kcontroller.Controller + + MockStart func(ctx context.Context) error + MockWatch func(src source.Source) error +} + +func (m *MockController) Start(ctx context.Context) error { + return m.MockStart(ctx) +} + +func (m *MockController) Watch(src source.Source) error { + return m.MockWatch(src) +} + +func TestStartController(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + type args struct { + name string + opts []ControllerOption + } + type want struct { + err error + } + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "NewControllerError": { + reason: "Start should return an error if it can't create a new controller", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + args: args{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return nil, errors.New("boom") + }), + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "StartControllerError": { + reason: "Start won't return an error if it can't start the new controller in a goroutine.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + args: args{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(_ context.Context) error { + return errors.New("boom") + }, + }, nil + }), + }, + }, + // TODO(negz): Test that the error was logged? We usually don't. + want: want{ + err: nil, + }, + }, + "SuccessfulStart": { + reason: "It should be possible to successfully start a controller.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + args: args{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + }, nil + }), + WithWatchGarbageCollector(&MockWatchGarbageCollector{ + MockGarbageCollectWatches: func(ctx context.Context, _ time.Duration) { + <-ctx.Done() + }, + }), + }, + }, + want: want{ + err: nil, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.args.name, tc.args.opts...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Starting the controller second time should be a no-op. + err = e.Start(tc.args.name, tc.args.opts...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err = e.Stop(ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +func TestIsRunning(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + + // We need to control how we start the controller. + type argsStart struct { + name string + opts []ControllerOption + } + type args struct { + name string + } + type want struct { + running bool + } + cases := map[string]struct { + reason string + params params + argsStart argsStart + args args + want want + }{ + "SuccessfulStart": { + reason: "IsRunning should return true if the controller successfully starts.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + }, + want: want{ + running: true, + }, + }, + "StartControllerError": { + reason: "IsRunning should return false if the controller didn't successfully start.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + }, + infs: &MockTrackingInformers{}, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(_ context.Context) error { + return errors.New("boom") + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + }, + want: want{ + running: false, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + _ = e.Start(tc.args.name, tc.argsStart.opts...) + + // Give the start goroutine a little time to fail. + time.Sleep(1 * time.Second) + + running := e.IsRunning(tc.args.name) + if diff := cmp.Diff(tc.want.running, running); diff != "" { + t.Errorf("\n%s\ne.IsRunning(...): -want, +got:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + _ = e.Stop(ctx, tc.args.name) + + // IsRunning should always be false after the controller is stopped. + running = e.IsRunning(tc.args.name) + if diff := cmp.Diff(false, running); diff != "" { + t.Errorf("\n%s\ne.IsRunning(...): -want, +got:\n%s", tc.reason, diff) + } + }) + } +} + +func TestStopController(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + type args struct { + ctx context.Context + name string + } + type want struct { + err error + } + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "SuccessfulStop": { + reason: "It should be possible to successfully stop a controller.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + }, + want: want{ + err: nil, + }, + }, + // TODO(negz): Test handling watches that fail to stop? I'm not sure + // it's worth the amount of complexity making StoppableSource injectable + // would add. We could make Watch an interface with a GetSource. + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.args.name, WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return nil + }, + }, nil + })) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Fatalf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Add a watch for stop to stop. + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Composed") + err = e.StartWatches(tc.args.name, WatchFor(u, WatchTypeComposedResource, nil)) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + err = e.Stop(tc.args.ctx, tc.args.name) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop should be a no-op when called on a stopped controller. + err = e.Stop(tc.args.ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +func TestStartWatches(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + // We need to control how we start the controller. + type argsStart struct { + name string + opts []ControllerOption + } + type args struct { + name string + ws []Watch + } + type want struct { + err error + watches []WatchID + } + cases := map[string]struct { + reason string + params params + argsStart argsStart + args args + want want + }{ + "StartWatchError": { + reason: "StartWatches should return an error when a watch fails to start.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "Composed", + }, + } + }, + }, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return errors.New("boom") + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + ws: []Watch{ + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Composed") + return WatchFor(u, WatchTypeComposedResource, nil) + }(), + }, + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "SuccessfulStartWatches": { + reason: "StartWatches shouldn't return an error when all watches start successfully.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + { + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + } + }, + }, + }, + argsStart: argsStart{ + name: "cool-controller", + opts: []ControllerOption{ + WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return nil + }, + }, nil + }), + }, + }, + args: args{ + name: "cool-controller", + ws: []Watch{ + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Resource") + return WatchFor(u, WatchTypeComposedResource, nil) + }(), + // This should be deduplicated into the above watch. + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Resource") + return WatchFor(u, WatchTypeComposedResource, nil) + }(), + // This shouldn't be deduplicated, because it's a different + // watch type. + func() Watch { + u := &unstructured.Unstructured{} + u.SetAPIVersion("test.crossplane.io/v1") + u.SetKind("Resource") + return WatchFor(u, WatchTypeCompositeResource, nil) + }(), + }, + }, + want: want{ + err: nil, + watches: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.argsStart.name, tc.argsStart.opts...) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Fatalf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + err = e.StartWatches(tc.args.name, tc.args.ws...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Start the same watches again to exercise the code that ensures we + // only add each watch once. + err = e.StartWatches(tc.args.name, tc.args.ws...) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + watches, err := e.GetWatches(tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.GetWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.watches, watches, + cmpopts.EquateEmpty(), + cmpopts.SortSlices(func(a, b WatchID) bool { return fmt.Sprintf("%s", a) > fmt.Sprintf("%s", b) }), + ); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err = e.Stop(ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +func TestStopWatches(t *testing.T) { + type params struct { + mgr manager.Manager + infs TrackingInformers + c client.Client + opts []ControllerEngineOption + } + type args struct { + ctx context.Context + name string + ws []WatchID + } + type want struct { + stopped int + err error + watches []WatchID + } + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "NoWatchesToStop": { + reason: "StopWatches should be a no-op when there's no watches to stop.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + ws: []WatchID{ + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "NeverStarted", + }, + }, + }, + }, + want: want{ + stopped: 0, + err: nil, + watches: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + }, + "StopOneWatch": { + reason: "StopWatches should only stop the watches it's asked to.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + ws: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + want: want{ + stopped: 1, + err: nil, + watches: []WatchID{ + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + }, + }, + }, + "StopAllWatches": { + reason: "StopWatches should stop all watches when asked to.", + params: params{ + mgr: &MockManager{ + MockElected: func() <-chan struct{} { + e := make(chan struct{}) + close(e) + return e + }, + MockGetScheme: runtime.NewScheme, + }, + infs: &MockTrackingInformers{ + MockActiveInformers: func() []schema.GroupVersionKind { + return nil + }, + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-controller", + ws: []WatchID{ + { + Type: WatchTypeComposedResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "Resource", + }, + }, + { + Type: WatchTypeCompositeResource, + GVK: schema.GroupVersionKind{ + Group: "test.crossplane.io", + Version: "v1", + Kind: "NeverStarted", + }, + }, + }, + }, + want: want{ + stopped: 2, + err: nil, + watches: []WatchID{}, + }, + }, + // TODO(negz): Test handling watches that fail to stop? I'm not sure + // it's worth the amount of complexity making StoppableSource injectable + // would add. We could make Watch an interface with a GetSource. + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + e := New(tc.params.mgr, tc.params.infs, tc.params.c, tc.params.opts...) + err := e.Start(tc.args.name, WithNewControllerFn(func(_ string, _ manager.Manager, _ kcontroller.Options) (kcontroller.Controller, error) { + return &MockController{ + MockStart: func(ctx context.Context) error { + <-ctx.Done() + return nil + }, + MockWatch: func(_ source.Source) error { + return nil + }, + }, nil + })) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Fatalf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Add some watches to stop. + u1 := &unstructured.Unstructured{} + u1.SetAPIVersion("test.crossplane.io/v1") + u1.SetKind("Resource") + err = e.StartWatches(tc.args.name, + WatchFor(u1, WatchTypeComposedResource, nil), + WatchFor(u1, WatchTypeCompositeResource, nil), + ) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + stopped, err := e.StopWatches(tc.args.ctx, tc.args.name, tc.args.ws...) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.StopWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.stopped, stopped); diff != "" { + t.Errorf("\n%s\ne.StopWatches(...): -want stopped, +got stopped:\n%s", tc.reason, diff) + } + + watches, err := e.GetWatches(tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.GetWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + if diff := cmp.Diff(tc.want.watches, watches, + cmpopts.EquateEmpty(), + cmpopts.SortSlices(func(a, b WatchID) bool { return fmt.Sprintf("%s", a) > fmt.Sprintf("%s", b) }), + ); diff != "" { + t.Errorf("\n%s\ne.StartWatches(...): -want error, +got error:\n%s", tc.reason, diff) + } + + // Stop the controller. Will be a no-op if it never started. + err = e.Stop(tc.args.ctx, tc.args.name) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ne.Stop(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/engine/source.go b/internal/engine/source.go new file mode 100644 index 000000000..e04aa3ca3 --- /dev/null +++ b/internal/engine/source.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + kcache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +var _ source.Source = &StoppableSource{} + +// NewStoppableSource returns a new watch source that can be stopped. +func NewStoppableSource(infs cache.Informers, t client.Object, h handler.EventHandler, ps ...predicate.Predicate) *StoppableSource { + return &StoppableSource{infs: infs, Type: t, handler: h, predicates: ps} +} + +// A StoppableSource is a controller-runtime watch source that can be stopped. +type StoppableSource struct { + infs cache.Informers + + Type client.Object + handler handler.EventHandler + predicates []predicate.Predicate + + reg kcache.ResourceEventHandlerRegistration +} + +// Start is internal and should be called only by the Controller to register +// an EventHandler with the Informer to enqueue reconcile.Requests. +func (s *StoppableSource) Start(ctx context.Context, q workqueue.RateLimitingInterface) error { + i, err := s.infs.GetInformer(ctx, s.Type, cache.BlockUntilSynced(true)) + if err != nil { + return errors.Wrapf(err, "cannot get informer for %T", s.Type) + } + + reg, err := i.AddEventHandler(NewEventHandler(ctx, q, s.handler, s.predicates...).HandlerFuncs()) + if err != nil { + return errors.Wrapf(err, "cannot add event handler") + } + s.reg = reg + + return nil +} + +// Stop removes the EventHandler from the source's Informer. The Informer will +// stop sending events to the source. +func (s *StoppableSource) Stop(ctx context.Context) error { + if s.reg == nil { + return nil + } + + i, err := s.infs.GetInformer(ctx, s.Type) + if err != nil { + return errors.Wrapf(err, "cannot get informer for %T", s.Type) + } + + if err := i.RemoveEventHandler(s.reg); err != nil { + return errors.Wrap(err, "cannot remove event handler") + } + + s.reg = nil + return nil +} + +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, q workqueue.RateLimitingInterface, h handler.EventHandler, ps ...predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: h, + queue: q, + predicates: ps, + } +} + +// An EventHandler converts a controller-runtime handler and predicates into a +// client-go ResourceEventHandler. It's a stripped down version of +// controller-runtime's internal implementation. +// https://github.com/kubernetes-sigs/controller-runtime/blob/v0.18.2/pkg/internal/source/event_handler.go#L35 +type EventHandler struct { + ctx context.Context //nolint:containedctx // Kept for compatibility with controller-runtime. + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs. +func (e *EventHandler) HandlerFuncs() kcache.ResourceEventHandlerFuncs { + return kcache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e *EventHandler) OnAdd(obj interface{}) { + o, ok := obj.(client.Object) + if !ok { + return + } + + c := event.CreateEvent{Object: o} + for _, p := range e.predicates { + if !p.Create(c) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) +} + +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { + o, ok := oldObj.(client.Object) + if !ok { + return + } + + n, ok := newObj.(client.Object) + if !ok { + return + } + + u := event.UpdateEvent{ObjectOld: o, ObjectNew: n} + + for _, p := range e.predicates { + if !p.Update(u) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) +} + +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e *EventHandler) OnDelete(obj interface{}) { + var d event.DeleteEvent + + switch o := obj.(type) { + case client.Object: + d = event.DeleteEvent{Object: o} + + // Deal with tombstone events by pulling the object out. Tombstone events + // wrap the object in a DeleteFinalStateUnknown struct, so the object needs + // to be pulled out. + case kcache.DeletedFinalStateUnknown: + wrapped, ok := o.Obj.(client.Object) + if !ok { + return + } + d = event.DeleteEvent{DeleteStateUnknown: true, Object: wrapped} + + default: + return + } + + for _, p := range e.predicates { + if !p.Delete(d) { + return + } + } + + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) +} diff --git a/internal/engine/source_test.go b/internal/engine/source_test.go new file mode 100644 index 000000000..e855c4ad9 --- /dev/null +++ b/internal/engine/source_test.go @@ -0,0 +1,272 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kcache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ cache.Informer = &MockInformer{} + +type MockInformer struct { + cache.Informer + + MockAddEventHandler func(handler kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) + MockRemoveEventHandler func(handle kcache.ResourceEventHandlerRegistration) error +} + +func (m *MockInformer) AddEventHandler(handler kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return m.MockAddEventHandler(handler) +} + +func (m *MockInformer) RemoveEventHandler(handle kcache.ResourceEventHandlerRegistration) error { + return m.MockRemoveEventHandler(handle) +} + +func TestStartSource(t *testing.T) { + type params struct { + infs cache.Informers + t client.Object + h handler.EventHandler + ps []predicate.Predicate + } + type args struct { + ctx context.Context + q workqueue.RateLimitingInterface + } + type want struct { + err error + } + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "GetInformerError": { + reason: "Start should return an error if it can't get an informer for the supplied type.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return nil, errors.New("boom") + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "AddEventHandlerError": { + reason: "Start should return an error if it can't add an event handler to the informer.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return nil, errors.New("boom") + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "SuccessfulStart": { + reason: "Start should return nil if it successfully starts the source.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return nil, nil + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: nil, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := NewStoppableSource(tc.params.infs, tc.params.t, tc.params.h, tc.params.ps...) + + err := s.Start(tc.args.ctx, tc.args.q) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ns.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} + +var _ kcache.ResourceEventHandlerRegistration = &MockRegistration{} + +type MockRegistration struct{} + +func (m *MockRegistration) HasSynced() bool { return true } + +func TestStopSource(t *testing.T) { + type params struct { + infs cache.Informers + t client.Object + h handler.EventHandler + ps []predicate.Predicate + } + type args struct { + ctx context.Context + q workqueue.RateLimitingInterface + } + type want struct { + err error + } + + // Used to return an error only when getting an informer to stop it. + started := false + + cases := map[string]struct { + reason string + params params + args args + want want + }{ + "SuccessfulStop": { + reason: "Stop should return nil if it successfully stops the source.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return &MockRegistration{}, nil + }, + MockRemoveEventHandler: func(_ kcache.ResourceEventHandlerRegistration) error { + return nil + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: nil, + }, + }, + "GetInformerError": { + reason: "Stop should return an error if it can't get an informer.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + if !started { + started = true + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return &MockRegistration{}, nil + }, + }, nil + } + return nil, errors.New("boom") + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + "RemoveEventHandlerError": { + reason: "Stop should return an error if it can't remove the source's event handler.", + params: params{ + infs: &MockCache{ + MockGetInformer: func(_ context.Context, _ client.Object, _ ...cache.InformerGetOption) (cache.Informer, error) { + return &MockInformer{ + MockAddEventHandler: func(_ kcache.ResourceEventHandler) (kcache.ResourceEventHandlerRegistration, error) { + return &MockRegistration{}, nil + }, + MockRemoveEventHandler: func(_ kcache.ResourceEventHandlerRegistration) error { + return errors.New("boom") + }, + }, nil + }, + }, + t: &unstructured.Unstructured{}, + }, + args: args{ + ctx: context.Background(), + q: nil, // Not called, just plumbed down to the event handler. + }, + want: want{ + err: cmpopts.AnyError, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + s := NewStoppableSource(tc.params.infs, tc.params.t, tc.params.h, tc.params.ps...) + + err := s.Start(tc.args.ctx, tc.args.q) + if diff := cmp.Diff(nil, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ns.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + + err = s.Stop(tc.args.ctx) + if diff := cmp.Diff(tc.want.err, err, cmpopts.EquateErrors()); diff != "" { + t.Errorf("\n%s\ns.Start(...): -want error, +got error:\n%s", tc.reason, diff) + } + }) + } +} diff --git a/internal/features/features.go b/internal/features/features.go index 19866bd12..f1514f19a 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -47,18 +47,8 @@ const ( EnableAlphaClaimSSA feature.Flag = "EnableAlphaClaimSSA" ) -// Beta Feature Flags +// Beta Feature Flags. const ( - // EnableBetaCompositionFunctions enables alpha support for composition - // functions. See the below design for more details. - // https://github.com/crossplane/crossplane/blob/863ff6/design/design-doc-composition-functions.md - EnableBetaCompositionFunctions feature.Flag = "EnableBetaCompositionFunctions" - - // EnableBetaCompositionFunctionsExtraResources enables extra resources support for - // composition functions. See the below design for more details. - // https://github.com/crossplane/crossplane/blob/863ff6/design/design-doc-composition-functions.md - EnableBetaCompositionFunctionsExtraResources feature.Flag = "EnableBetaCompositionFunctionsExtraResources" - // EnableBetaCompositionWebhookSchemaValidation enables alpha support for // composition webhook schema validation. See the below design for more // details. diff --git a/internal/initializer/cert_generator.go b/internal/initializer/cert_generator.go index 380b75be8..b162d345d 100644 --- a/internal/initializer/cert_generator.go +++ b/internal/initializer/cert_generator.go @@ -31,7 +31,7 @@ const ( errGenerateCertificate = "cannot generate tls certificate" ) -// CertificateSigner is the parent's certificate and key that will be used to sign the certificate +// CertificateSigner is the parent's certificate and key that will be used to sign the certificate. type CertificateSigner struct { certificate *x509.Certificate key *rsa.PrivateKey @@ -40,18 +40,16 @@ type CertificateSigner struct { // CertificateGenerator can return you TLS certificate valid for given domains. type CertificateGenerator interface { - Generate(*x509.Certificate, *CertificateSigner) (key []byte, crt []byte, err error) + Generate(c *x509.Certificate, cs *CertificateSigner) (key, crt []byte, err error) } -var ( - pkixName = pkix.Name{ - CommonName: "Crossplane", - Organization: []string{"Crossplane"}, - Country: []string{"Earth"}, - Province: []string{"Earth"}, - Locality: []string{"Earth"}, - } -) +var pkixName = pkix.Name{ //nolint:gochecknoglobals // We treat this as a constant. + CommonName: "Crossplane", + Organization: []string{"Crossplane"}, + Country: []string{"Earth"}, + Province: []string{"Earth"}, + Locality: []string{"Earth"}, +} // NewCertGenerator returns a new CertGenerator. func NewCertGenerator() *CertGenerator { diff --git a/internal/initializer/crds.go b/internal/initializer/crds.go index 4a7683c61..1754b4f5c 100644 --- a/internal/initializer/crds.go +++ b/internal/initializer/crds.go @@ -80,7 +80,7 @@ type CoreCRDs struct { } // Run applies all CRDs in the given directory. -func (c *CoreCRDs) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(negz): Can anything be broken out (maybe the loop body)? +func (c *CoreCRDs) Run(ctx context.Context, kube client.Client) error { var caBundle []byte if c.WebhookTLSSecretRef != nil { s := &corev1.Secret{} diff --git a/internal/initializer/crds_migrator.go b/internal/initializer/crds_migrator.go index 0a7987dc8..4692b39c9 100644 --- a/internal/initializer/crds_migrator.go +++ b/internal/initializer/crds_migrator.go @@ -46,7 +46,7 @@ type CoreCRDsMigrator struct { } // Run applies all CRDs in the given directory. -func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(phisco) refactor +func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { var crd extv1.CustomResourceDefinition if err := kube.Get(ctx, client.ObjectKey{Name: c.crdName}, &crd); err != nil { if kerrors.IsNotFound(err) { @@ -67,7 +67,7 @@ func (c *CoreCRDsMigrator) Run(ctx context.Context, kube client.Client) error { break } } - var resources = unstructured.UnstructuredList{} + resources := unstructured.UnstructuredList{} resources.SetGroupVersionKind(schema.GroupVersionKind{ Group: crd.Spec.Group, Version: storageVersion, diff --git a/internal/initializer/deployment_runtime_config_test.go b/internal/initializer/deployment_runtime_config_test.go index 2142fb03b..1402d750c 100644 --- a/internal/initializer/deployment_runtime_config_test.go +++ b/internal/initializer/deployment_runtime_config_test.go @@ -43,7 +43,7 @@ func TestDeploymentRuntimeConfigObject(t *testing.T) { "FailedToCreate": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return errBoom }, }, @@ -55,7 +55,7 @@ func TestDeploymentRuntimeConfigObject(t *testing.T) { "SuccessCreated": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -64,7 +64,7 @@ func TestDeploymentRuntimeConfigObject(t *testing.T) { "SuccessAlreadyExists": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return kerrors.NewAlreadyExists(schema.GroupResource{}, "default") }, }, diff --git a/internal/initializer/installer.go b/internal/initializer/installer.go index 18810e992..2242a8d4d 100644 --- a/internal/initializer/installer.go +++ b/internal/initializer/installer.go @@ -33,15 +33,17 @@ import ( const ( errListProviders = "failed getting provider list" errListConfigurations = "failed getting configuration list" + errListFunctions = "failed getting function list" errParsePackageName = "package name is not valid" errApplyPackage = "cannot apply package" ) // NewPackageInstaller returns a new package installer. -func NewPackageInstaller(p []string, c []string) *PackageInstaller { +func NewPackageInstaller(p []string, c []string, f []string) *PackageInstaller { return &PackageInstaller{ providers: p, configurations: c, + functions: f, } } @@ -49,12 +51,13 @@ func NewPackageInstaller(p []string, c []string) *PackageInstaller { type PackageInstaller struct { configurations []string providers []string + functions []string } // Run makes sure all specified packages exist. -func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // TODO(negz): Could any of this be broken out? - pkgs := make([]client.Object, len(pi.providers)+len(pi.configurations)) - // NOTE(hasheddan): we build maps of existing Provider and Configuration +func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { + pkgs := make([]client.Object, len(pi.providers)+len(pi.configurations)+len(pi.functions)) + // NOTE(hasheddan): we build maps of existing Provider, Configuration and Function // sources to the package names such that we can update the version when a // package specified for install matches the source of an existing package. pl := &v1.ProviderList{} @@ -86,8 +89,20 @@ func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { } cMap[xpkg.ParsePackageSourceFromReference(ref)] = c.GetName() } + fl := &v1.FunctionList{} + if err := kube.List(ctx, fl); err != nil && !kerrors.IsNotFound(err) { + return errors.Wrap(err, errListFunctions) + } + fMap := make(map[string]string, len(fl.Items)) + for _, f := range fl.Items { + ref, err := name.ParseReference(f.GetSource(), name.WithDefaultRegistry("")) + if err != nil { + continue + } + fMap[xpkg.ParsePackageSourceFromReference(ref)] = f.GetName() + } // NOTE(hasheddan): we maintain a separate index from the range so that - // Providers and Configurations can be added to the same slice for applying. + // Providers, Configurations and Functions can be added to the same slice for applying. pkgsIdx := 0 for _, img := range pi.providers { p := &v1.Provider{} @@ -105,6 +120,14 @@ func (pi *PackageInstaller) Run(ctx context.Context, kube client.Client) error { pkgs[pkgsIdx] = c pkgsIdx++ } + for _, img := range pi.functions { + f := &v1.Function{} + if err := buildPack(f, img, fMap); err != nil { + return err + } + pkgs[pkgsIdx] = f + pkgsIdx++ + } pa := resource.NewAPIPatchingApplicator(kube) for _, p := range pkgs { if err := pa.Apply(ctx, p); err != nil { diff --git a/internal/initializer/installer_test.go b/internal/initializer/installer_test.go index 6a4ffd62b..143df84c6 100644 --- a/internal/initializer/installer_test.go +++ b/internal/initializer/installer_test.go @@ -40,6 +40,9 @@ const ( errFmtGetConfiguration = "unexpected name in configuration get: %s" errFmtPatchConfiguration = "unexpected name in configuration update: %s" errFmtPatchConfigurationSource = "unexpected source in configuration update: %s" + errFmtGetFunction = "unexpected name in function get: %s" + errFmtPatchFunction = "unexpected name in function update: %s" + errFmtPatchFunctionSource = "unexpected source in function update: %s" ) var errBoom = errors.New("boom") @@ -53,9 +56,15 @@ func TestInstaller(t *testing.T) { c1 := "crossplane/getting-started-aws:v0.0.1" c1Repo := "crossplane/getting-started-aws" c1Name := "crossplane-getting-started-aws" + f1Existing := "existing-function" + f1 := "crossplane/function-auto-ready:v0.0.1" + f1Repo := "crossplane/function-auto-ready" + f1Name := "crossplane-function-auto-ready" + type args struct { p []string c []string + f []string kube client.Client } type want struct { @@ -69,6 +78,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { switch l := list.(type) { @@ -102,6 +112,21 @@ func TestInstaller(t *testing.T) { }, }, } + case *v1.FunctionList: + *l = v1.FunctionList{ + Items: []v1.Function{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: f1Name, + }, + Spec: v1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: f1, + }, + }, + }, + }, + } default: t.Errorf("unexpected type") } @@ -117,6 +142,10 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1.Function: + if key.Name != f1Name { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } @@ -132,6 +161,10 @@ func TestInstaller(t *testing.T) { if obj.GetName() != c1Name { t.Errorf(errFmtPatchConfiguration, obj.GetName()) } + case *v1.Function: + if obj.GetName() != f1Name { + t.Errorf(errFmtPatchFunction, obj.GetName()) + } default: t.Errorf("unexpected type") } @@ -144,6 +177,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { switch l := list.(type) { @@ -177,6 +211,21 @@ func TestInstaller(t *testing.T) { }, }, } + case *v1.FunctionList: + *l = v1.FunctionList{ + Items: []v1.Function{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: f1Existing, + }, + Spec: v1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: fmt.Sprintf("%s:%s", f1Repo, "v100.100.100"), + }, + }, + }, + }, + } default: t.Errorf("unexpected type") } @@ -192,6 +241,10 @@ func TestInstaller(t *testing.T) { if key.Name != c1Existing { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1.Function: + if key.Name != f1Existing { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } @@ -213,6 +266,13 @@ func TestInstaller(t *testing.T) { if o.GetSource() != c1 { t.Errorf(errFmtPatchConfigurationSource, o.GetSource()) } + case *v1.Function: + if o.GetName() != f1Existing { + t.Errorf(errFmtPatchFunction, o.GetName()) + } + if o.GetSource() != f1 { + t.Errorf(errFmtPatchFunctionSource, o.GetSource()) + } default: t.Errorf("unexpected type") } @@ -225,8 +285,9 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ - MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { @@ -239,12 +300,16 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1.Function: + if key.Name != f1Name { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, - MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -254,6 +319,7 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { switch l := list.(type) { @@ -284,6 +350,31 @@ func TestInstaller(t *testing.T) { } case *v1.ConfigurationList: return nil + case *v1.FunctionList: + *l = v1.FunctionList{ + Items: []v1.Function{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "other-function", + }, + Spec: v1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: fmt.Sprintf("%s:%s", "other-repo", "v100.100.100"), + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "another-function", + }, + Spec: v1.FunctionSpec{ + PackageSpec: v1.PackageSpec{ + Package: "preloaded-source", + }, + }, + }, + }, + } default: t.Errorf("unexpected type") } @@ -299,12 +390,16 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf(errFmtGetConfiguration, key.Name) } + case *v1.Function: + if key.Name != f1Name { + t.Errorf(errFmtGetFunction, key.Name) + } default: t.Errorf("unexpected type") } return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, - MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -316,7 +411,7 @@ func TestInstaller(t *testing.T) { args: args{ c: []string{c1}, kube: &test.MockClient{ - MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { @@ -327,12 +422,14 @@ func TestInstaller(t *testing.T) { if key.Name != c1Name { t.Errorf("unexpected name in configuration apply") } + case *v1.Function: + t.Errorf("no functions specified") default: t.Errorf("unexpected type") } return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, - MockCreate: func(_ context.Context, obj client.Object, _ ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -342,11 +439,12 @@ func TestInstaller(t *testing.T) { args: args{ p: []string{p1}, c: []string{c1}, + f: []string{f1}, kube: &test.MockClient{ - MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, @@ -358,7 +456,7 @@ func TestInstaller(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - i := NewPackageInstaller(tc.args.p, tc.args.c) + i := NewPackageInstaller(tc.args.p, tc.args.c, tc.args.f) err := i.Run(context.TODO(), tc.args.kube) if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" { t.Errorf("\n%s\nRun(...): -want err, +got err:\n%s", name, diff) diff --git a/internal/initializer/lock_test.go b/internal/initializer/lock_test.go index 7c36f775c..f34823b73 100644 --- a/internal/initializer/lock_test.go +++ b/internal/initializer/lock_test.go @@ -53,7 +53,7 @@ func TestLockObject(t *testing.T) { "FailApply": { args: args{ kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, diff --git a/internal/initializer/store_config_test.go b/internal/initializer/store_config_test.go index 19ddcd583..5afe5335f 100644 --- a/internal/initializer/store_config_test.go +++ b/internal/initializer/store_config_test.go @@ -43,7 +43,7 @@ func TestStoreConfigObject(t *testing.T) { "FailedToCreate": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return errBoom }, }, @@ -55,7 +55,7 @@ func TestStoreConfigObject(t *testing.T) { "SuccessCreated": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return nil }, }, @@ -64,7 +64,7 @@ func TestStoreConfigObject(t *testing.T) { "SuccessAlreadyExists": { args: args{ kube: &test.MockClient{ - MockCreate: func(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + MockCreate: func(_ context.Context, _ client.Object, _ ...client.CreateOption) error { return kerrors.NewAlreadyExists(schema.GroupResource{}, "default") }, }, diff --git a/internal/initializer/tls.go b/internal/initializer/tls.go index bd3a36e85..24c387dd5 100644 --- a/internal/initializer/tls.go +++ b/internal/initializer/tls.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/errors" @@ -50,16 +49,16 @@ const ( const ( // RootCACertSecretName is the name of the secret that will store CA certificates and rest of the - // certificates created per entities will be signed by this CA + // certificates created per entities will be signed by this CA. RootCACertSecretName = "crossplane-root-ca" - // SecretKeyCACert is the secret key of CA certificate + // SecretKeyCACert is the secret key of CA certificate. SecretKeyCACert = "ca.crt" ) // TLSCertificateGenerator is an initializer step that will find the given secret // and fill its tls.crt, tls.key and ca.crt fields to be used for External Secret -// Store plugins +// Store plugins. type TLSCertificateGenerator struct { namespace string caSecretName string @@ -75,14 +74,14 @@ type TLSCertificateGenerator struct { // TLSCertificateGeneratorOption is used to configure TLSCertificateGenerator behavior. type TLSCertificateGeneratorOption func(*TLSCertificateGenerator) -// TLSCertificateGeneratorWithLogger returns an TLSCertificateGeneratorOption that configures logger +// TLSCertificateGeneratorWithLogger returns an TLSCertificateGeneratorOption that configures logger. func TLSCertificateGeneratorWithLogger(log logging.Logger) TLSCertificateGeneratorOption { return func(g *TLSCertificateGenerator) { g.log = log } } -// TLSCertificateGeneratorWithOwner returns an TLSCertificateGeneratorOption that sets owner reference +// TLSCertificateGeneratorWithOwner returns an TLSCertificateGeneratorOption that sets owner reference. func TLSCertificateGeneratorWithOwner(owner []metav1.OwnerReference) TLSCertificateGeneratorOption { return func(g *TLSCertificateGenerator) { g.owner = owner @@ -128,7 +127,9 @@ func (e *TLSCertificateGenerator) loadOrGenerateCA(ctx context.Context, kube cli return nil, errors.Wrapf(err, errFmtGetTLSSecret, nn.Name) } + create := true if err == nil { + create = false kd := caSecret.Data[corev1.TLSPrivateKeyKey] cd := caSecret.Data[corev1.TLSCertKey] if len(kd) != 0 && len(cd) != 0 { @@ -157,13 +158,15 @@ func (e *TLSCertificateGenerator) loadOrGenerateCA(ctx context.Context, kube cli caSecret.Name = nn.Name caSecret.Namespace = nn.Namespace - _, err = controllerruntime.CreateOrUpdate(ctx, kube, caSecret, func() error { - caSecret.Data = map[string][]byte{ - corev1.TLSCertKey: caCrtByte, - corev1.TLSPrivateKeyKey: caKeyByte, - } - return nil - }) + caSecret.Data = map[string][]byte{ + corev1.TLSCertKey: caCrtByte, + corev1.TLSPrivateKeyKey: caKeyByte, + } + if create { + err = kube.Create(ctx, caSecret) + } else { + err = kube.Update(ctx, caSecret) + } if err != nil { return nil, errors.Wrapf(err, errFmtCannotCreateOrUpdate, nn.Name) } @@ -179,7 +182,9 @@ func (e *TLSCertificateGenerator) ensureClientCertificate(ctx context.Context, k return errors.Wrapf(err, errFmtGetTLSSecret, nn.Name) } + create := true if err == nil { + create = false if len(sec.Data[corev1.TLSPrivateKeyKey]) != 0 || len(sec.Data[corev1.TLSCertKey]) != 0 || len(sec.Data[SecretKeyCACert]) != 0 { e.log.Info("TLS secret contains client certificate.", "secret", nn.Name) return nil @@ -212,17 +217,18 @@ func (e *TLSCertificateGenerator) ensureClientCertificate(ctx context.Context, k if e.owner != nil { sec.OwnerReferences = e.owner } - _, err = controllerruntime.CreateOrUpdate(ctx, kube, sec, func() error { - if sec.Data == nil { - sec.Data = make(map[string][]byte) - } - sec.Data[corev1.TLSCertKey] = certData - sec.Data[corev1.TLSPrivateKeyKey] = keyData - sec.Data[SecretKeyCACert] = signer.certificatePEM - - return nil - }) + if sec.Data == nil { + sec.Data = make(map[string][]byte) + } + sec.Data[corev1.TLSCertKey] = certData + sec.Data[corev1.TLSPrivateKeyKey] = keyData + sec.Data[SecretKeyCACert] = signer.certificatePEM + if create { + err = kube.Create(ctx, sec) + } else { + err = kube.Update(ctx, sec) + } return errors.Wrapf(err, errFmtCannotCreateOrUpdate, nn.Name) } @@ -234,7 +240,9 @@ func (e *TLSCertificateGenerator) ensureServerCertificate(ctx context.Context, k return errors.Wrapf(err, errFmtGetTLSSecret, nn.Name) } + create := true if err == nil { + create = false if len(sec.Data[corev1.TLSCertKey]) != 0 || len(sec.Data[corev1.TLSPrivateKeyKey]) != 0 || len(sec.Data[SecretKeyCACert]) != 0 { e.log.Info("TLS secret contains server certificate.", "secret", nn.Name) return nil @@ -268,17 +276,18 @@ func (e *TLSCertificateGenerator) ensureServerCertificate(ctx context.Context, k if e.owner != nil { sec.OwnerReferences = e.owner } - _, err = controllerruntime.CreateOrUpdate(ctx, kube, sec, func() error { - if sec.Data == nil { - sec.Data = make(map[string][]byte) - } - sec.Data[corev1.TLSCertKey] = certData - sec.Data[corev1.TLSPrivateKeyKey] = keyData - sec.Data[SecretKeyCACert] = signer.certificatePEM - - return nil - }) + if sec.Data == nil { + sec.Data = make(map[string][]byte) + } + sec.Data[corev1.TLSCertKey] = certData + sec.Data[corev1.TLSPrivateKeyKey] = keyData + sec.Data[SecretKeyCACert] = signer.certificatePEM + if create { + err = kube.Create(ctx, sec) + } else { + err = kube.Update(ctx, sec) + } return errors.Wrapf(err, errFmtCannotCreateOrUpdate, nn.Name) } diff --git a/internal/initializer/tls_test.go b/internal/initializer/tls_test.go index 7fd3621e3..126cbad60 100644 --- a/internal/initializer/tls_test.go +++ b/internal/initializer/tls_test.go @@ -108,7 +108,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -129,7 +129,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be updated.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -161,7 +161,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return no error after loading the CA from the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name != caCertSecretName { return nil } @@ -177,7 +177,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { MockUpdate: test.NewMockUpdateFn(nil), }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte("test-key"), []byte("test-cert"), nil }, }, @@ -191,7 +191,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be parsed.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -228,7 +228,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the server secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -260,7 +260,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the client secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -301,7 +301,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the CA and TLS certificates are generated and put into the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -337,7 +337,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { return errors.New("unexpected secret name or namespace") }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { if obj.GetName() == tlsServerSecretName && obj.GetNamespace() == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -366,7 +366,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte(caKey), []byte(caCert), nil }, }, @@ -380,7 +380,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the CA and TLS certificates are already in the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -428,7 +428,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { MockGet: test.NewMockGetFn(nil), }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return nil, nil, errBoom }, }, @@ -445,7 +445,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA and TLS certificates cannot be generated.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -460,7 +460,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return nil, nil, errBoom }, }, @@ -479,7 +479,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -499,7 +499,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the server secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -530,7 +530,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the server certificates are already in the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -565,9 +565,8 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { "OnlyServerCertificateSuccessfulGeneratedServerCert": { reason: "It should be successful if the server certificate is generated and put into the Secret.", args: args{ - kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -592,7 +591,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { return errors.New("unexpected secret name or namespace") }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { if obj.GetName() == tlsServerSecretName && obj.GetNamespace() == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -609,7 +608,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte(caKey), []byte(caCert), nil }, }, @@ -623,7 +622,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the CA secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name != caCertSecretName || key.Namespace != secretNS { return errors.New("unexpected secret name or namespace") } @@ -643,7 +642,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should return error if the client secret cannot be retrieved.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -674,7 +673,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { reason: "It should be successful if the client certificates are already in the Secret.", args: args{ kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -709,9 +708,8 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { "OnlyClientCertificateSuccessfulGeneratedClientCert": { reason: "It should be successful if the client certificate is generated and put into the Secret.", args: args{ - kube: &test.MockClient{ - MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { if key.Name == caCertSecretName && key.Namespace == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -736,7 +734,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { return errors.New("unexpected secret name or namespace") }, - MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + MockUpdate: func(_ context.Context, obj client.Object, _ ...client.UpdateOption) error { if obj.GetName() == tlsClientSecretName && obj.GetNamespace() == secretNS { s := &corev1.Secret{ Data: map[string][]byte{ @@ -753,7 +751,7 @@ func TestTLSCertificateGeneratorRun(t *testing.T) { }, certificate: &MockCertificateGenerator{ - MockGenerate: func(cert *x509.Certificate, signer *CertificateSigner) ([]byte, []byte, error) { + MockGenerate: func(_ *x509.Certificate, _ *CertificateSigner) ([]byte, []byte, error) { return []byte(caKey), []byte(caCert), nil }, }, diff --git a/internal/initializer/waiter_test.go b/internal/initializer/waiter_test.go index c4d2d215e..b79fb2ab6 100644 --- a/internal/initializer/waiter_test.go +++ b/internal/initializer/waiter_test.go @@ -51,7 +51,7 @@ func TestCRDWaiter(t *testing.T) { period: 1 * time.Second, timeout: 2 * time.Second, kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return nil }, }, @@ -63,7 +63,7 @@ func TestCRDWaiter(t *testing.T) { timeout: 2 * time.Millisecond, period: 1 * time.Millisecond, kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { return kerrors.NewNotFound(schema.GroupResource{}, key.Name) }, }, @@ -78,7 +78,7 @@ func TestCRDWaiter(t *testing.T) { period: 1 * time.Millisecond, timeout: 1 * time.Second, kube: &test.MockClient{ - MockGet: func(_ context.Context, key client.ObjectKey, obj client.Object) error { + MockGet: func(_ context.Context, _ client.ObjectKey, _ client.Object) error { return errBoom }, }, diff --git a/internal/initializer/webhook_configurations.go b/internal/initializer/webhook_configurations.go index e6d8e579f..d126f2e01 100644 --- a/internal/initializer/webhook_configurations.go +++ b/internal/initializer/webhook_configurations.go @@ -75,7 +75,7 @@ type WebhookConfigurations struct { // Run applies all webhook ValidatingWebhookConfigurations and // MutatingWebhookConfiguration in the given directory. -func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) error { //nolint:gocyclo // Only slightly over (11). +func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) error { s := &corev1.Secret{} if err := kube.Get(ctx, c.TLSSecretRef, s); err != nil { return errors.Wrap(err, errGetWebhookSecret) @@ -132,7 +132,7 @@ func (c *WebhookConfigurations) Run(ctx context.Context, kube client.Client) err default: return errors.Errorf("only MutatingWebhookConfiguration and ValidatingWebhookConfiguration kinds are accepted, got %T", obj) } - if err := pa.Apply(ctx, obj.(client.Object)); err != nil { + if err := pa.Apply(ctx, obj.(client.Object)); err != nil { //nolint:forcetypeassert // Should always be a client.Object. return errors.Wrap(err, errApplyWebhookConfiguration) } } diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go index 24ff7e8fa..35a49d7c3 100644 --- a/internal/metrics/metrics.go +++ b/internal/metrics/metrics.go @@ -18,7 +18,11 @@ package metrics import "sigs.k8s.io/controller-runtime/pkg/metrics" +// TODO(negz): Should we try to plumb the metrics registry down to all callers? +// I think this would be a good practice - similar to how we plumb the logger. +// On the other hand, using a global metrics registry is idiomatic for Prom. + // Registry is a Prometheus metrics registry. All Crossplane metrics should be // registered with it. Crossplane adds metrics to the registry created and // served by controller-runtime. -var Registry = metrics.Registry +var Registry = metrics.Registry //nolint:gochecknoglobals // See TODO above. diff --git a/internal/names/generate.go b/internal/names/generate.go index 721511ff9..33cf591ab 100644 --- a/internal/names/generate.go +++ b/internal/names/generate.go @@ -84,7 +84,7 @@ func (r *nameGenerator) GenerateName(ctx context.Context, cd resource.Object) er // locally. To reduce that risk even further the caller must employ a // conflict recovery mechanism. maxTries := 10 - for i := 0; i < maxTries; i++ { + for range maxTries { name := r.namer.GenerateName(cd.GetGenerateName()) obj := composite.Unstructured{} obj.SetGroupVersionKind(cd.GetObjectKind().GroupVersionKind()) diff --git a/internal/names/generate_test.go b/internal/names/generate_test.go index 728ac3e7d..a0bc585cf 100644 --- a/internal/names/generate_test.go +++ b/internal/names/generate_test.go @@ -110,7 +110,7 @@ func TestGenerateName(t *testing.T) { }, "SuccessAfterConflict": { reason: "Name is found on second try", - client: &test.MockClient{MockGet: func(ctx context.Context, key client.ObjectKey, obj client.Object) error { + client: &test.MockClient{MockGet: func(_ context.Context, key client.ObjectKey, _ client.Object) error { if key.Name == "cool-resource-42" { return nil } diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 32d98198d..f95d296be 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -26,7 +26,9 @@ import ( // DefaultUserAgent is the default User-Agent header that is set when making // HTTP requests for packages. -var DefaultUserAgent = fmt.Sprintf("%s/%s", "crossplane", version.New().GetVersionString()) +func DefaultUserAgent() string { + return fmt.Sprintf("%s/%s", "crossplane", version.New().GetVersionString()) +} // UserAgent wraps a RoundTripper and injects a user agent header. type UserAgent struct { diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 4b1334944..da7c029ac 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -55,8 +55,10 @@ func userAgentValidator(userAgent string) requestValidationFn { } } -var _ http.RoundTripper = &UserAgent{} -var _ http.RoundTripper = &validatingRoundTripper{} +var ( + _ http.RoundTripper = &UserAgent{} + _ http.RoundTripper = &validatingRoundTripper{} +) func TestUserAgent(t *testing.T) { cases := map[string]struct { diff --git a/internal/usage/handler.go b/internal/usage/handler.go index 2865c4621..cbc84f9ca 100644 --- a/internal/usage/handler.go +++ b/internal/usage/handler.go @@ -30,10 +30,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/yaml" "github.com/crossplane/crossplane-runtime/pkg/controller" "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/logging" + xpmeta "github.com/crossplane/crossplane-runtime/pkg/meta" xpunstructured "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured" "github.com/crossplane/crossplane/apis/apiextensions/v1alpha1" @@ -41,9 +43,14 @@ import ( const ( // InUseIndexKey used to index CRDs by "Kind" and "group", to be used when - // indexing and retrieving needed CRDs + // indexing and retrieving needed CRDs. InUseIndexKey = "inuse.apiversion.kind.name" + // AnnotationKeyDeletionAttempt is the annotation key used to record whether + // a deletion attempt was made and blocked by the Usage. The value stored is + // the propagation policy used with the deletion attempt. + AnnotationKeyDeletionAttempt = "usage.crossplane.io/deletion-attempt-with-policy" + // Error strings. errFmtUnexpectedOp = "unexpected operation %q, expected \"DELETE\"" ) @@ -68,7 +75,7 @@ func indexValue(apiVersion, kind, name string) string { func SetupWebhookWithManager(mgr ctrl.Manager, options controller.Options) error { indexer := mgr.GetFieldIndexer() if err := indexer.IndexField(context.Background(), &v1alpha1.Usage{}, InUseIndexKey, func(obj client.Object) []string { - u := obj.(*v1alpha1.Usage) + u := obj.(*v1alpha1.Usage) //nolint:forcetypeassert // Will always be a Usage. if u.Spec.Of.ResourceRef == nil || len(u.Spec.Of.ResourceRef.Name) == 0 { return []string{} } @@ -87,7 +94,7 @@ func SetupWebhookWithManager(mgr ctrl.Manager, options controller.Options) error // Handler implements the admission Handler for Composition. type Handler struct { - reader client.Reader + client client.Client log logging.Logger } @@ -102,9 +109,9 @@ func WithLogger(l logging.Logger) HandlerOption { } // NewHandler returns a new Handler. -func NewHandler(reader client.Reader, opts ...HandlerOption) *Handler { +func NewHandler(client client.Client, opts ...HandlerOption) *Handler { h := &Handler{ - reader: reader, + client: client, log: logging.NewNopLogger(), } @@ -126,22 +133,44 @@ func (h *Handler) Handle(ctx context.Context, request admission.Request) admissi if err := u.UnmarshalJSON(request.OldObject.Raw); err != nil { return admission.Errored(http.StatusBadRequest, err) } - return h.validateNoUsages(ctx, u) + opts := &metav1.DeleteOptions{} + if err := yaml.Unmarshal(request.Options.Raw, opts); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + return h.validateNoUsages(ctx, u, opts) default: return admission.Errored(http.StatusBadRequest, errors.Errorf(errFmtUnexpectedOp, request.Operation)) } } -func (h *Handler) validateNoUsages(ctx context.Context, u *unstructured.Unstructured) admission.Response { - h.log.Debug("Validating no usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName()) +func (h *Handler) validateNoUsages(ctx context.Context, u *unstructured.Unstructured, opts *metav1.DeleteOptions) admission.Response { + h.log.Debug("Validating no usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "policy", opts.PropagationPolicy) usageList := &v1alpha1.UsageList{} - if err := h.reader.List(ctx, usageList, client.MatchingFields{InUseIndexKey: IndexValueForObject(u)}); err != nil { + if err := h.client.List(ctx, usageList, client.MatchingFields{InUseIndexKey: IndexValueForObject(u)}); err != nil { h.log.Debug("Error when getting Usages", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "err", err) return admission.Errored(http.StatusInternalServerError, err) } if len(usageList.Items) > 0 { msg := inUseMessage(usageList) h.log.Debug("Usage found, deletion not allowed", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "msg", msg) + + // Use the default propagation policy if not provided + policy := metav1.DeletePropagationBackground + if opts.PropagationPolicy != nil { + policy = *opts.PropagationPolicy + } + // If the resource is being deleted, we want to record the first deletion attempt + // so that we can track whether a deletion was attempted at least once. + if u.GetAnnotations() == nil || u.GetAnnotations()[AnnotationKeyDeletionAttempt] != string(policy) { + orig := u.DeepCopy() + xpmeta.AddAnnotations(u, map[string]string{AnnotationKeyDeletionAttempt: string(policy)}) + // Patch the resource to add the deletion attempt annotation + if err := h.client.Patch(ctx, u, client.MergeFrom(orig)); err != nil { + h.log.Debug("Error when patching the resource to add the deletion attempt annotation", "apiVersion", u.GetAPIVersion(), "kind", u.GetKind(), "name", u.GetName(), "err", err) + return admission.Errored(http.StatusInternalServerError, err) + } + } + return admission.Response{ AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, diff --git a/internal/usage/handler_test.go b/internal/usage/handler_test.go index 0b41ba8e1..734f3da30 100644 --- a/internal/usage/handler_test.go +++ b/internal/usage/handler_test.go @@ -42,7 +42,7 @@ var errBoom = errors.New("boom") func TestHandle(t *testing.T) { protected := "This resource is protected!" type args struct { - reader client.Reader + client client.Client request admission.Request } type want struct { @@ -121,8 +121,8 @@ func TestHandle(t *testing.T) { "DeleteAllowedNoUsages": { reason: "We should allow a delete request if there is no usages for the given object.", args: args{ - reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + client: &test.MockClient{ + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return nil }, }, @@ -147,8 +147,8 @@ func TestHandle(t *testing.T) { "DeleteRejectedCannotList": { reason: "We should reject a delete request if we cannot list usages.", args: args{ - reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + client: &test.MockClient{ + MockList: func(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { return errBoom }, }, @@ -173,8 +173,11 @@ func TestHandle(t *testing.T) { "DeleteBlockedWithUsageBy": { reason: "We should reject a delete request if there are usages for the given object with \"by\" defined.", args: args{ - reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + client: &test.MockClient{ + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return nil + }, + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ { @@ -231,8 +234,11 @@ func TestHandle(t *testing.T) { "DeleteBlockedWithUsageReason": { reason: "We should reject a delete request if there are usages for the given object with \"reason\" defined.", args: args{ - reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + client: &test.MockClient{ + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return nil + }, + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ { @@ -283,8 +289,11 @@ func TestHandle(t *testing.T) { "DeleteBlockedWithUsageNone": { reason: "We should reject a delete request if there are usages for the given object without \"reason\" or \"by\" defined.", args: args{ - reader: &test.MockClient{ - MockList: func(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + client: &test.MockClient{ + MockPatch: func(_ context.Context, _ client.Object, _ client.Patch, _ ...client.PatchOption) error { + return nil + }, + MockList: func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { l := list.(*v1alpha1.UsageList) l.Items = []v1alpha1.Usage{ { @@ -334,7 +343,7 @@ func TestHandle(t *testing.T) { } for name, tc := range cases { t.Run(name, func(t *testing.T) { - h := NewHandler(tc.args.reader, WithLogger(logging.NewNopLogger())) + h := NewHandler(tc.args.client, WithLogger(logging.NewNopLogger())) got := h.Handle(context.Background(), tc.args.request) if diff := cmp.Diff(tc.want.resp, got); diff != "" { t.Errorf("%s\nHandle(...): -want response, +got:\n%s", tc.reason, diff) diff --git a/internal/validation/apiextensions/v1/composition/handler.go b/internal/validation/apiextensions/v1/composition/handler.go index c9750fee8..21dc9328c 100644 --- a/internal/validation/apiextensions/v1/composition/handler.go +++ b/internal/validation/apiextensions/v1/composition/handler.go @@ -60,7 +60,7 @@ func SetupWebhookWithManager(mgr ctrl.Manager, options controller.Options) error // The index is used by the getCRD function below. indexer := mgr.GetFieldIndexer() if err := indexer.IndexField(context.Background(), &extv1.CustomResourceDefinition{}, crdsIndexKey, func(obj client.Object) []string { - return []string{getIndexValueForCRD(obj.(*extv1.CustomResourceDefinition))} + return []string{getIndexValueForCRD(obj.(*extv1.CustomResourceDefinition))} //nolint:forcetypeassert // Will always be a CRD. }); err != nil { return err } @@ -79,7 +79,7 @@ type validator struct { } // ValidateCreate validates a Composition. -func (v *validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { //nolint:gocyclo // Currently only at 11 +func (v *validator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { comp, ok := obj.(*v1.Composition) if !ok { return nil, errors.New(errNotComposition) @@ -189,7 +189,6 @@ func (v *validator) getNeededCRDs(ctx context.Context, comp *v1.Composition) (ma // Get schema for all Managed Resource Definitions defined by // comp.Spec.Resources. for _, res := range comp.Spec.Resources { - res := res gvk, err := composition.GetBaseObjectGVK(&res) if err != nil { return nil, []error{err} diff --git a/internal/validation/apiextensions/v1/xrd/handler.go b/internal/validation/apiextensions/v1/xrd/handler.go index 1f3243c90..ce58cacec 100644 --- a/internal/validation/apiextensions/v1/xrd/handler.go +++ b/internal/validation/apiextensions/v1/xrd/handler.go @@ -25,6 +25,7 @@ import ( apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -104,23 +105,23 @@ func (v *validator) ValidateCreate(ctx context.Context, obj runtime.Object) (war } // ValidateUpdate implements the same logic as ValidateCreate. -func (v *validator) ValidateUpdate(ctx context.Context, old, new runtime.Object) (warns admission.Warnings, err error) { +func (v *validator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warns admission.Warnings, err error) { // Validate the update - oldObj, ok := old.(*v1.CompositeResourceDefinition) + oldXRD, ok := oldObj.(*v1.CompositeResourceDefinition) if !ok { return nil, errors.New(errUnexpectedType) } - newObj, ok := new.(*v1.CompositeResourceDefinition) + newXRD, ok := newObj.(*v1.CompositeResourceDefinition) if !ok { return nil, errors.New(errUnexpectedType) } // Validate the update - validationWarns, validationErr := newObj.ValidateUpdate(oldObj) + validationWarns, validationErr := newXRD.ValidateUpdate(oldXRD) warns = append(warns, validationWarns...) if validationErr != nil { return validationWarns, validationErr.ToAggregate() } - crds, err := getAllCRDsForXRD(newObj) + crds, err := getAllCRDsForXRD(newXRD) if err != nil { return warns, xperrors.Wrap(err, "cannot get CRDs for CompositeResourceDefinition") } @@ -137,7 +138,7 @@ func (v *validator) ValidateUpdate(ctx context.Context, old, new runtime.Object) // which previously did not specify a claim. err := v.dryRunUpdateOrCreateIfNotFound(ctx, crd) if err != nil { - return warns, v.rewriteError(err, newObj, crd) + return warns, v.rewriteError(err, newXRD, crd) } } @@ -145,16 +146,18 @@ func (v *validator) ValidateUpdate(ctx context.Context, old, new runtime.Object) } func (v *validator) dryRunUpdateOrCreateIfNotFound(ctx context.Context, crd *apiextv1.CustomResourceDefinition) error { - got := crd.DeepCopy() - err := v.client.Get(ctx, client.ObjectKey{Name: crd.Name}, got) - if err == nil { - got.Spec = crd.Spec - return v.client.Update(ctx, got, client.DryRunAll) - } - if kerrors.IsNotFound(err) { - return v.client.Create(ctx, crd, client.DryRunAll) - } - return err + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + got := crd.DeepCopy() + err := v.client.Get(ctx, client.ObjectKey{Name: crd.Name}, got) + if err == nil { + got.Spec = crd.Spec + return v.client.Update(ctx, got, client.DryRunAll) + } + if kerrors.IsNotFound(err) { + return v.client.Create(ctx, crd, client.DryRunAll) + } + return err + }) } // ValidateDelete always allows delete requests. diff --git a/internal/xcrd/crd.go b/internal/xcrd/crd.go index 085050083..e7336464e 100644 --- a/internal/xcrd/crd.go +++ b/internal/xcrd/crd.go @@ -188,6 +188,7 @@ func genCrdVersion(vr v1.CompositeResourceDefinitionVersion, maxNameLength int64 xSpec := s.Properties["spec"] cSpec := crdv.Schema.OpenAPIV3Schema.Properties["spec"] cSpec.Required = append(cSpec.Required, xSpec.Required...) + cSpec.XPreserveUnknownFields = xSpec.XPreserveUnknownFields cSpec.XValidations = append(cSpec.XValidations, xSpec.XValidations...) cSpec.OneOf = append(cSpec.OneOf, xSpec.OneOf...) cSpec.Description = xSpec.Description diff --git a/internal/xcrd/crd_test.go b/internal/xcrd/crd_test.go index 55db8df22..f14c9f0fe 100644 --- a/internal/xcrd/crd_test.go +++ b/internal/xcrd/crd_test.go @@ -466,6 +466,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -783,6 +792,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1030,7 +1048,6 @@ func TestForCompositeResource(t *testing.T) { Type: "object", Description: "", Properties: map[string]extv1.JSONSchemaProps{ - // From CompositeResourceStatusProps() "conditions": { Description: "Conditions of the resource.", @@ -1051,6 +1068,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1334,6 +1360,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1627,6 +1662,15 @@ func TestForCompositeResource(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -1663,6 +1707,309 @@ func TestForCompositeResource(t *testing.T) { c: nil, }, }, + "PreserveUnknownFieldsInSpec": { + reason: "A CRD should set PreserveUnknownFields based on the XRD PreserveUnknownFields.", + args: args{ + v: &v1.CompositeResourceValidation{ + OpenAPIV3Schema: runtime.RawExtension{Raw: []byte(strings.Replace(schema, `"spec": {`, `"spec": { "x-kubernetes-preserve-unknown-fields": true,`, 1))}, + }, + }, + want: want{ + c: &extv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + meta.AsController(meta.TypedReferenceTo(d, v1.CompositeResourceDefinitionGroupVersionKind)), + }, + }, + Spec: extv1.CustomResourceDefinitionSpec{ + Group: group, + Names: extv1.CustomResourceDefinitionNames{ + Plural: plural, + Singular: singular, + Kind: kind, + ListKind: listKind, + Categories: []string{CategoryComposite}, + }, + Scope: extv1.ClusterScoped, + Versions: []extv1.CustomResourceDefinitionVersion{{ + Name: version, + Served: true, + Storage: true, + Subresources: &extv1.CustomResourceSubresources{ + Status: &extv1.CustomResourceSubresourceStatus{}, + }, + AdditionalPrinterColumns: []extv1.CustomResourceColumnDefinition{ + { + Name: "SYNCED", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Synced')].status", + }, + { + Name: "READY", + Type: "string", + JSONPath: ".status.conditions[?(@.type=='Ready')].status", + }, + { + Name: "COMPOSITION", + Type: "string", + JSONPath: ".spec.compositionRef.name", + }, + { + Name: "AGE", + Type: "date", + JSONPath: ".metadata.creationTimestamp", + }, + }, + Schema: &extv1.CustomResourceValidation{ + OpenAPIV3Schema: &extv1.JSONSchemaProps{ + Type: "object", + Description: "What the resource is for.", + Required: []string{"spec"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": { + Type: "string", + }, + "kind": { + Type: "string", + }, + "metadata": { + // NOTE(muvaf): api-server takes care of validating + // metadata. + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "name": { + Type: "string", + MaxLength: ptr.To[int64](63), + }, + }, + }, + "spec": { + Type: "object", + Required: []string{"storageGB", "engineVersion"}, + Description: "Specification of the resource.", + XPreserveUnknownFields: ptr.To(true), + Properties: map[string]extv1.JSONSchemaProps{ + // From CRDSpecTemplate.Validation + "storageGB": {Type: "integer", Description: "Pretend this is useful."}, + "engineVersion": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"5.6"`)}, + {Raw: []byte(`"5.7"`)}, + }, + }, + "someField": {Type: "string", Description: "Pretend this is useful."}, + "someOtherField": {Type: "string", Description: "Pretend this is useful."}, + + // From CompositeResourceSpecProps() + "compositionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionRevisionRef": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + }, + }, + "compositionRevisionSelector": { + Type: "object", + Required: []string{"matchLabels"}, + Properties: map[string]extv1.JSONSchemaProps{ + "matchLabels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + }, + }, + "compositionUpdatePolicy": { + Type: "string", + Enum: []extv1.JSON{ + {Raw: []byte(`"Automatic"`)}, + {Raw: []byte(`"Manual"`)}, + }, + }, + "claimRef": { + Type: "object", + Required: []string{"apiVersion", "kind", "namespace", "name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "kind": {Type: "string"}, + "namespace": {Type: "string"}, + "name": {Type: "string"}, + }, + }, + "environmentConfigRefs": { + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "name": {Type: "string"}, + "kind": {Type: "string"}, + }, + Required: []string{"apiVersion", "kind"}, + }, + }, + }, + "resourceRefs": { + Type: "array", + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "name": {Type: "string"}, + "kind": {Type: "string"}, + }, + Required: []string{"apiVersion", "kind"}, + }, + }, + XListType: ptr.To("atomic"), + }, + "publishConnectionDetailsTo": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "configRef": { + Type: "object", + Default: &extv1.JSON{Raw: []byte(`{"name": "default"}`)}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": { + Type: "string", + }, + }, + }, + "metadata": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "labels": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "annotations": { + Type: "object", + AdditionalProperties: &extv1.JSONSchemaPropsOrBool{ + Allows: true, + Schema: &extv1.JSONSchemaProps{Type: "string"}, + }, + }, + "type": { + Type: "string", + }, + }, + }, + }, + }, + "writeConnectionSecretToRef": { + Type: "object", + Required: []string{"name", "namespace"}, + Properties: map[string]extv1.JSONSchemaProps{ + "name": {Type: "string"}, + "namespace": {Type: "string"}, + }, + }, + }, + XValidations: extv1.ValidationRules{ + { + Message: "Cannot change engine version", + Rule: "self.engineVersion == oldSelf.engineVersion", + }, + }, + OneOf: []extv1.JSONSchemaProps{ + {Required: []string{"someField"}}, + {Required: []string{"someOtherField"}}, + }, + }, + "status": { + Type: "object", + Description: "Status of the resource.", + Properties: map[string]extv1.JSONSchemaProps{ + "phase": {Type: "string"}, + "something": {Type: "string"}, + + // From CompositeResourceStatusProps() + "conditions": { + Description: "Conditions of the resource.", + Type: "array", + XListType: ptr.To("map"), + XListMapKeys: []string{"type"}, + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "object", + Required: []string{"lastTransitionTime", "reason", "status", "type"}, + Properties: map[string]extv1.JSONSchemaProps{ + "lastTransitionTime": {Type: "string", Format: "date-time"}, + "message": {Type: "string"}, + "reason": {Type: "string"}, + "status": {Type: "string"}, + "type": {Type: "string"}, + }, + }, + }, + }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + "connectionDetails": { + Type: "object", + Properties: map[string]extv1.JSONSchemaProps{ + "lastPublishedTime": {Type: "string", Format: "date-time"}, + }, + }, + }, + XValidations: extv1.ValidationRules{ + { + Message: "Phase is required once set", + Rule: "!has(oldSelf.phase) || has(self.phase)", + }, + }, + OneOf: []extv1.JSONSchemaProps{ + {Required: []string{"phase"}}, + {Required: []string{"something"}}, + }, + }, + }, + }, + }, + }}, + }, + }, + }, + }, } for name, tc := range cases { @@ -1979,8 +2326,10 @@ func TestForCompositeResourceClaim(t *testing.T) { }, "compositeDeletePolicy": { Type: "string", - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, - {Raw: []byte(`"Foreground"`)}}, + Enum: []extv1.JSON{ + {Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}, + }, }, // From CompositeResourceClaimSpecProps() "compositionRef": { @@ -2118,6 +2467,15 @@ func TestForCompositeResourceClaim(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -2263,8 +2621,10 @@ func TestForCompositeResourceClaim(t *testing.T) { "compositeDeletePolicy": { Type: "string", Default: &extv1.JSON{Raw: []byte(fmt.Sprintf("\"%s\"", defaultPolicy))}, - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, - {Raw: []byte(`"Foreground"`)}}, + Enum: []extv1.JSON{ + {Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}, + }, }, // From CompositeResourceClaimSpecProps() "compositionRef": { @@ -2402,6 +2762,15 @@ func TestForCompositeResourceClaim(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ @@ -2569,8 +2938,10 @@ func TestForCompositeResourceClaimEmptyXrd(t *testing.T) { Properties: map[string]extv1.JSONSchemaProps{ "compositeDeletePolicy": { Type: "string", - Enum: []extv1.JSON{{Raw: []byte(`"Background"`)}, - {Raw: []byte(`"Foreground"`)}}, + Enum: []extv1.JSON{ + {Raw: []byte(`"Background"`)}, + {Raw: []byte(`"Foreground"`)}, + }, }, // From CompositeResourceClaimSpecProps() "compositionRef": { @@ -2700,6 +3071,15 @@ func TestForCompositeResourceClaimEmptyXrd(t *testing.T) { }, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "connectionDetails": { Type: "object", Properties: map[string]extv1.JSONSchemaProps{ diff --git a/internal/xcrd/fuzz_test.go b/internal/xcrd/fuzz_test.go index 00918b668..40e9e6c54 100644 --- a/internal/xcrd/fuzz_test.go +++ b/internal/xcrd/fuzz_test.go @@ -25,7 +25,7 @@ import ( ) func FuzzForCompositeResourceXcrd(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) xrd := &v1.CompositeResourceDefinition{} err := ff.GenerateStruct(xrd) @@ -37,7 +37,7 @@ func FuzzForCompositeResourceXcrd(f *testing.F) { } func FuzzForCompositeResourceClaim(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { + f.Fuzz(func(_ *testing.T, data []byte) { ff := fuzz.NewConsumer(data) xrd := &v1.CompositeResourceDefinition{} err := ff.GenerateStruct(xrd) diff --git a/internal/xcrd/schemas.go b/internal/xcrd/schemas.go index 01fee98f3..1d21b5cfd 100644 --- a/internal/xcrd/schemas.go +++ b/internal/xcrd/schemas.go @@ -28,12 +28,12 @@ const ( LabelKeyClaimNamespace = "crossplane.io/claim-namespace" ) -// CompositionRevisionRef should be propagated dynamically -var CompositionRevisionRef = "compositionRevisionRef" +// CompositionRevisionRef should be propagated dynamically. +const CompositionRevisionRef = "compositionRevisionRef" // PropagateSpecProps is the list of XRC spec properties to propagate // when translating an XRC into an XR. -var PropagateSpecProps = []string{"compositionRef", "compositionSelector", "compositionUpdatePolicy", "compositionRevisionSelector"} +var PropagateSpecProps = []string{"compositionRef", "compositionSelector", "compositionUpdatePolicy", "compositionRevisionSelector"} //nolint:gochecknoglobals // We treat this as a constant. // TODO(negz): Add descriptions to schema fields. @@ -356,6 +356,15 @@ func CompositeResourceStatusProps() map[string]extv1.JSONSchemaProps { "lastPublishedTime": {Type: "string", Format: "date-time"}, }, }, + "claimConditionTypes": { + Type: "array", + XListType: ptr.To("set"), + Items: &extv1.JSONSchemaPropsOrArray{ + Schema: &extv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, } } @@ -413,7 +422,7 @@ func CompositeResourceClaimPrinterColumns() []extv1.CustomResourceColumnDefiniti } } -// GetPropFields returns the fields from a map of schema properties +// GetPropFields returns the fields from a map of schema properties. func GetPropFields(props map[string]extv1.JSONSchemaProps) []string { propFields := make([]string, len(props)) i := 0 diff --git a/internal/xfn/function_runner.go b/internal/xfn/function_runner.go index caef9eaaf..b722da7fc 100644 --- a/internal/xfn/function_runner.go +++ b/internal/xfn/function_runner.go @@ -24,18 +24,21 @@ import ( "github.com/pkg/errors" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crossplane/crossplane-runtime/pkg/logging" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" + fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) -// Error strings +// Error strings. const ( errListFunctionRevisions = "cannot list FunctionRevisions" errNoActiveRevisions = "cannot find an active FunctionRevision (a FunctionRevision with spec.desiredState: Active)" @@ -127,7 +130,7 @@ func NewPackagedFunctionRunner(c client.Reader, o ...PackagedFunctionRunnerOptio // RunFunction sends the supplied RunFunctionRequest to the named Function. The // function is expected to be an installed Function.pkg.crossplane.io package. -func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, req *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, req *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { conn, err := r.getClientConn(ctx, name) if err != nil { return nil, errors.Wrapf(err, errFmtGetClientConn, name) @@ -137,7 +140,7 @@ func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, r ctx, cancel := context.WithTimeout(ctx, runFunctionTimeout) defer cancel() - rsp, err := v1beta1.NewFunctionRunnerServiceClient(conn).RunFunction(ctx, req) + rsp, err := NewBetaFallBackFunctionRunnerServiceClient(conn).RunFunction(ctx, req) return rsp, errors.Wrapf(err, errFmtRunFunction, name) } @@ -167,12 +170,12 @@ func (r *PackagedFunctionRunner) RunFunction(ctx context.Context, name string, r func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) (*grpc.ClientConn, error) { log := r.log.WithValues("function", name) - l := &pkgv1beta1.FunctionRevisionList{} + l := &pkgv1.FunctionRevisionList{} if err := r.client.List(ctx, l, client.MatchingLabels{pkgv1.LabelParentPackage: name}); err != nil { return nil, errors.Wrapf(err, errListFunctionRevisions) } - var active *pkgv1beta1.FunctionRevision + var active *pkgv1.FunctionRevision for i := range l.Items { if l.Items[i].GetDesiredState() == pkgv1.PackageRevisionActive { active = &l.Items[i] @@ -187,12 +190,24 @@ func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) return nil, errors.Errorf(errFmtEmptyEndpoint, active.GetName()) } + // If we have a connection for the up-to-date endpoint, return it. r.connsMx.RLock() conn, ok := r.conns[name] + if ok && conn.Target() == active.Status.Endpoint { + defer r.connsMx.RUnlock() + return conn, nil + } r.connsMx.RUnlock() + // Either we didn't have a connection, or it wasn't up-to-date. + r.connsMx.Lock() + defer r.connsMx.Unlock() + + // Another Goroutine might have updated the connections between when we + // released the read lock and took the write lock, so check again. + conn, ok = r.conns[name] if ok { - // We have a connection for the up-to-date endpoint. Return it. + // We now have a connection for the up-to-date endpoint. if conn.Target() == active.Status.Endpoint { return conn, nil } @@ -202,6 +217,7 @@ func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) // already closed or in the process of closing. log.Debug("Closing gRPC client connection with stale target", "old-target", conn.Target(), "new-target", active.Status.Endpoint) _ = conn.Close() + delete(r.conns, name) } // This context is only used for setting up the connection. @@ -221,9 +237,7 @@ func (r *PackagedFunctionRunner) getClientConn(ctx context.Context, name string) return nil, errors.Wrapf(err, errFmtDialFunction, active.Status.Endpoint, active.GetName()) } - r.connsMx.Lock() r.conns[name] = conn - r.connsMx.Unlock() log.Debug("Created new gRPC client connection", "target", active.Status.Endpoint) return conn, nil @@ -258,19 +272,18 @@ func (r *PackagedFunctionRunner) GarbageCollectConnectionsNow(ctx context.Contex // path where no connections need garbage collecting we shouldn't // take it at all. + // No need to take a write lock or list Functions if there's no work to do. r.connsMx.RLock() - connections := make([]string, 0, len(r.conns)) - for name := range r.conns { - connections = append(connections, name) + if len(r.conns) == 0 { + defer r.connsMx.RUnlock() + return 0, nil } r.connsMx.RUnlock() - // No need to list Functions if there's no work to do. - if len(connections) == 0 { - return 0, nil - } + r.connsMx.Lock() + defer r.connsMx.Unlock() - l := &pkgv1beta1.FunctionList{} + l := &pkgv1.FunctionList{} if err := r.client.List(ctx, l); err != nil { return 0, errors.Wrap(err, errListFunctions) } @@ -280,28 +293,85 @@ func (r *PackagedFunctionRunner) GarbageCollectConnectionsNow(ctx context.Contex functionExists[f.GetName()] = true } - // Build a list of connections to garbage collect. - gc := make([]string, 0) - for _, name := range connections { - if !functionExists[name] { - gc = append(gc, name) + // Garbage collect connections. + closed := 0 + for name := range r.conns { + if functionExists[name] { + continue } - } - - // No need to take a write lock if there's no work to do. - if len(gc) == 0 { - return 0, nil - } - r.log.Debug("Closing gRPC client connections for Functions that are no longer installed", "functions", gc) - r.connsMx.Lock() - for _, name := range gc { // Close only returns an error is if the connection is already // closed or in the process of closing. _ = r.conns[name].Close() delete(r.conns, name) + closed++ + r.log.Debug("Closed gRPC client connection to Function that is no longer installed", "function", name) + } + + return closed, nil +} + +// A BetaFallBackFunctionRunnerServiceClient tries to send a v1 RPC. If the +// server reports that v1 is unimplemented, it falls back to sending a v1beta1 +// RPC. It translates the v1 RunFunctionRequest to v1beta1 by round-tripping it +// through protobuf encoding. This works because the two messages are guaranteed +// to be identical - the v1beta1 proto is replicated from the v1 proto. +type BetaFallBackFunctionRunnerServiceClient struct { + cc *grpc.ClientConn +} + +// NewBetaFallBackFunctionRunnerServiceClient returns a client that falls back +// to v1beta1 when v1 is unimplemented. +func NewBetaFallBackFunctionRunnerServiceClient(cc *grpc.ClientConn) *BetaFallBackFunctionRunnerServiceClient { + return &BetaFallBackFunctionRunnerServiceClient{cc: cc} +} + +// RunFunction tries to send a v1 RunFunctionRequest. It falls back to v1beta1 +// if the v1 service is unimplemented. +func (c *BetaFallBackFunctionRunnerServiceClient) RunFunction(ctx context.Context, req *fnv1.RunFunctionRequest, opts ...grpc.CallOption) (*fnv1.RunFunctionResponse, error) { + rsp, err := fnv1.NewFunctionRunnerServiceClient(c.cc).RunFunction(ctx, req, opts...) + + // If the v1 RPC worked, just return the response. + if err == nil { + return rsp, nil + } + + // If we hit an error other than Unimplemented, return it. + if status.Code(err) != codes.Unimplemented { + return nil, err + } + + // The v1 RPC is unimplemented. Try the v1beta1 equivalent. The messages + // should be identical in Go and on the wire. + breq, err := toBeta(req) + if err != nil { + return nil, err } - r.connsMx.Unlock() + brsp, err := fnv1beta1.NewFunctionRunnerServiceClient(c.cc).RunFunction(ctx, breq, opts...) + if err != nil { + return nil, err + } + + rsp, err = fromBeta(brsp) + return rsp, err +} + +func toBeta(req *fnv1.RunFunctionRequest) (*fnv1beta1.RunFunctionRequest, error) { + out := &fnv1beta1.RunFunctionRequest{} + b, err := proto.Marshal(req) + if err != nil { + return nil, errors.Wrapf(err, "cannot marshal %T to protobuf bytes", req) + } + err = proto.Unmarshal(b, out) + return out, errors.Wrapf(err, "cannot unmarshal %T protobuf bytes into %T", req, out) +} - return len(gc), nil +func fromBeta(rsp *fnv1beta1.RunFunctionResponse) (*fnv1.RunFunctionResponse, error) { + out := &fnv1.RunFunctionResponse{} + b, err := proto.Marshal(rsp) + if err != nil { + return nil, errors.Wrapf(err, "cannot marshal %T to protobuf bytes", rsp) + } + err = proto.Unmarshal(b, out) + return out, errors.Wrapf(err, "cannot unmarshal %T protobuf bytes into %T", rsp, out) } diff --git a/internal/xfn/function_runner_metrics.go b/internal/xfn/function_runner_metrics.go index 7e0dfa762..8b6bf1adf 100644 --- a/internal/xfn/function_runner_metrics.go +++ b/internal/xfn/function_runner_metrics.go @@ -23,7 +23,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/status" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" ) // Metrics are requests, errors, and duration (RED) metrics for composition @@ -41,20 +41,20 @@ func NewMetrics() *Metrics { Subsystem: "composition", Name: "run_function_request_total", Help: "Total number of RunFunctionRequests sent.", - }, []string{"function_name", "function_package", "grpc_target"}), + }, []string{"function_name", "function_package", "grpc_target", "grpc_method"}), responses: prometheus.NewCounterVec(prometheus.CounterOpts{ Subsystem: "composition", Name: "run_function_response_total", Help: "Total number of RunFunctionResponses received.", - }, []string{"function_name", "function_package", "grpc_target", "grpc_code", "result_severity"}), + }, []string{"function_name", "function_package", "grpc_target", "grpc_method", "grpc_code", "result_severity"}), duration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: "composition", Name: "run_function_seconds", Help: "Histogram of RunFunctionResponse latency (seconds).", Buckets: prometheus.DefBuckets, - }, []string{"function_name", "function_package", "grpc_target", "grpc_code", "result_severity"}), + }, []string{"function_name", "function_package", "grpc_target", "grpc_method", "grpc_code", "result_severity"}), } } @@ -80,7 +80,7 @@ func (m *Metrics) Collect(ch chan<- prometheus.Metric) { // function. The supplied package (pkg) should be the package's OCI reference. func (m *Metrics) CreateInterceptor(name, pkg string) grpc.UnaryClientInterceptor { return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - l := prometheus.Labels{"function_name": name, "function_package": pkg, "grpc_target": cc.Target()} + l := prometheus.Labels{"function_name": name, "function_package": pkg, "grpc_target": cc.Target(), "grpc_method": method} m.requests.With(l).Inc() @@ -97,16 +97,16 @@ func (m *Metrics) CreateInterceptor(name, pkg string) grpc.UnaryClientIntercepto // no fatal results, has severity "Warning". A response with fatal // results has severity "Fatal". l["result_severity"] = "Normal" - if rsp, ok := reply.(*v1beta1.RunFunctionResponse); ok { + if rsp, ok := reply.(*fnv1.RunFunctionResponse); ok { for _, r := range rsp.GetResults() { // Keep iterating if we see a warning result - we might still // see a fatal result. - if r.GetSeverity() == v1beta1.Severity_SEVERITY_WARNING { + if r.GetSeverity() == fnv1.Severity_SEVERITY_WARNING { l["result_severity"] = "Warning" } // Break if we see a fatal result, to ensure we don't downgrade // the severity to warning. - if r.GetSeverity() == v1beta1.Severity_SEVERITY_FATAL { + if r.GetSeverity() == fnv1.Severity_SEVERITY_FATAL { l["result_severity"] = "Fatal" break } diff --git a/internal/xfn/function_runner_test.go b/internal/xfn/function_runner_test.go index 6b960418e..a4473bcb7 100644 --- a/internal/xfn/function_runner_test.go +++ b/internal/xfn/function_runner_test.go @@ -31,11 +31,13 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" - "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" + fnv1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1" + fnv1beta1 "github.com/crossplane/crossplane/apis/apiextensions/fn/proto/v1beta1" pkgv1 "github.com/crossplane/crossplane/apis/pkg/v1" - pkgv1beta1 "github.com/crossplane/crossplane/apis/pkg/v1beta1" ) +var _ fnv1.FunctionRunnerServiceClient = &BetaFallBackFunctionRunnerServiceClient{} + func TestRunFunction(t *testing.T) { errBoom := errors.New("boom") @@ -49,10 +51,10 @@ func TestRunFunction(t *testing.T) { type args struct { ctx context.Context name string - req *v1beta1.RunFunctionRequest + req *fnv1.RunFunctionRequest } type want struct { - rsp *v1beta1.RunFunctionResponse + rsp *fnv1.RunFunctionResponse err error } cases := map[string]struct { @@ -81,9 +83,9 @@ func TestRunFunction(t *testing.T) { params: params{ c: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*pkgv1beta1.FunctionRevisionList).Items = []pkgv1beta1.FunctionRevision{ + obj.(*pkgv1.FunctionRevisionList).Items = []pkgv1.FunctionRevision{ { - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionInactive, // This revision is not active. }, @@ -107,17 +109,17 @@ func TestRunFunction(t *testing.T) { params: params{ c: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*pkgv1beta1.FunctionRevisionList).Items = []pkgv1beta1.FunctionRevision{ + obj.(*pkgv1.FunctionRevisionList).Items = []pkgv1.FunctionRevision{ { ObjectMeta: metav1.ObjectMeta{ Name: "cool-fn-revision-a", }, - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionActive, }, }, - Status: pkgv1beta1.FunctionRevisionStatus{ + Status: pkgv1.FunctionRevisionStatus{ Endpoint: "", // An empty endpoint. }, }, @@ -140,28 +142,75 @@ func TestRunFunction(t *testing.T) { c: &test.MockClient{ MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { // Start a gRPC server. - lis := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, + }}) + listeners = append(listeners, lis) + + l, ok := obj.(*pkgv1.FunctionRevisionList) + if !ok { + // If we're called to list Functions we want to + // return none, to make sure we GC everything. + return nil + } + l.Items = []pkgv1.FunctionRevision{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "cool-fn-revision-a", + }, + Spec: pkgv1.FunctionRevisionSpec{ + PackageRevisionSpec: pkgv1.PackageRevisionSpec{ + DesiredState: pkgv1.PackageRevisionActive, + }, + }, + Status: pkgv1.FunctionRevisionStatus{ + Endpoint: strings.Replace(lis.Addr().String(), "127.0.0.1", "dns:///localhost", 1), + }, + }, + } + return nil + }), + }, + }, + args: args{ + ctx: context.Background(), + name: "cool-fn", + req: &fnv1.RunFunctionRequest{}, + }, + want: want{ + rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, + }, + }, + }, + "SuccessfulFallbackToBeta": { + reason: "We should create a new client connection and successfully make a v1beta1 request if the server doesn't yet implement v1", + params: params{ + c: &test.MockClient{ + MockList: test.NewMockListFn(nil, func(obj client.ObjectList) error { + // Start a gRPC server. + lis := NewBetaGRPCServer(t, &MockBetaFunctionServer{rsp: &fnv1beta1.RunFunctionResponse{ + Meta: &fnv1beta1.ResponseMeta{Tag: "hi!"}, }}) listeners = append(listeners, lis) - l, ok := obj.(*pkgv1beta1.FunctionRevisionList) + l, ok := obj.(*pkgv1.FunctionRevisionList) if !ok { // If we're called to list Functions we want to // return none, to make sure we GC everything. return nil } - l.Items = []pkgv1beta1.FunctionRevision{ + l.Items = []pkgv1.FunctionRevision{ { ObjectMeta: metav1.ObjectMeta{ Name: "cool-fn-revision-a", }, - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionActive, }, }, - Status: pkgv1beta1.FunctionRevisionStatus{ + Status: pkgv1.FunctionRevisionStatus{ Endpoint: strings.Replace(lis.Addr().String(), "127.0.0.1", "dns:///localhost", 1), }, }, @@ -173,11 +222,11 @@ func TestRunFunction(t *testing.T) { args: args{ ctx: context.Background(), name: "cool-fn", - req: &v1beta1.RunFunctionRequest{}, + req: &fnv1.RunFunctionRequest{}, }, want: want{ - rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }, }, }, @@ -211,12 +260,14 @@ func TestRunFunction(t *testing.T) { } func TestGetClientConn(t *testing.T) { + t.Helper() + // TestRunFunction exercises most of the getClientConn code. Here we just // test some cases that don't fit well in our usual table-driven format. // Start a gRPC server. - lis := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) defer lis.Close() @@ -254,8 +305,8 @@ func TestGetClientConn(t *testing.T) { }) // Start another gRPC server. - lis2 := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis2 := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) defer lis2.Close() @@ -287,8 +338,8 @@ func TestGarbageCollectConnectionsNow(t *testing.T) { // table-driven format. // Start a gRPC server. - lis := NewGRPCServer(t, &MockFunctionServer{rsp: &v1beta1.RunFunctionResponse{ - Meta: &v1beta1.ResponseMeta{Tag: "hi!"}, + lis := NewGRPCServer(t, &MockFunctionServer{rsp: &fnv1.RunFunctionResponse{ + Meta: &fnv1.ResponseMeta{Tag: "hi!"}, }}) defer lis.Close() @@ -310,7 +361,7 @@ func TestGarbageCollectConnectionsNow(t *testing.T) { t.Run("FunctionStillExistsDoNotGarbageCollect", func(t *testing.T) { c.MockList = test.NewMockListFn(nil, func(obj client.ObjectList) error { - obj.(*pkgv1beta1.FunctionList).Items = []pkgv1beta1.Function{ + obj.(*pkgv1.FunctionList).Items = []pkgv1.Function{ { // This Function exists! ObjectMeta: metav1.ObjectMeta{Name: "cool-fn"}, @@ -346,23 +397,23 @@ func TestGarbageCollectConnectionsNow(t *testing.T) { func NewListFn(target string) test.MockListFn { return test.NewMockListFn(nil, func(obj client.ObjectList) error { - l, ok := obj.(*pkgv1beta1.FunctionRevisionList) + l, ok := obj.(*pkgv1.FunctionRevisionList) if !ok { // If we're called to list Functions we want to // return none, to make sure we GC everything. return nil } - l.Items = []pkgv1beta1.FunctionRevision{ + l.Items = []pkgv1.FunctionRevision{ { ObjectMeta: metav1.ObjectMeta{ Name: "cool-fn-revision-a", }, - Spec: pkgv1beta1.FunctionRevisionSpec{ + Spec: pkgv1.FunctionRevisionSpec{ PackageRevisionSpec: pkgv1.PackageRevisionSpec{ DesiredState: pkgv1.PackageRevisionActive, }, }, - Status: pkgv1beta1.FunctionRevisionStatus{ + Status: pkgv1.FunctionRevisionStatus{ Endpoint: target, }, }, @@ -371,7 +422,30 @@ func NewListFn(target string) test.MockListFn { }) } -func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Listener { +func NewGRPCServer(t *testing.T, ss fnv1.FunctionRunnerServiceServer) net.Listener { + t.Helper() + + // Listen on a random port. + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + t.Logf("Listening for gRPC connections on %q", lis.Addr().String()) + + // TODO(negz): Is it worth using a WaitGroup for these? + go func() { + s := grpc.NewServer() + fnv1.RegisterFunctionRunnerServiceServer(s, ss) + _ = s.Serve(lis) + }() + + // The caller must close this listener to terminate the server. + return lis +} + +func NewBetaGRPCServer(t *testing.T, ss fnv1beta1.FunctionRunnerServiceServer) net.Listener { + t.Helper() + // Listen on a random port. lis, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -382,7 +456,7 @@ func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Lis // TODO(negz): Is it worth using a WaitGroup for these? go func() { s := grpc.NewServer() - v1beta1.RegisterFunctionRunnerServiceServer(s, ss) + fnv1beta1.RegisterFunctionRunnerServiceServer(s, ss) _ = s.Serve(lis) }() @@ -391,12 +465,23 @@ func NewGRPCServer(t *testing.T, ss v1beta1.FunctionRunnerServiceServer) net.Lis } type MockFunctionServer struct { - v1beta1.UnimplementedFunctionRunnerServiceServer + fnv1.UnimplementedFunctionRunnerServiceServer + + rsp *fnv1.RunFunctionResponse + err error +} + +func (s *MockFunctionServer) RunFunction(context.Context, *fnv1.RunFunctionRequest) (*fnv1.RunFunctionResponse, error) { + return s.rsp, s.err +} + +type MockBetaFunctionServer struct { + fnv1beta1.UnimplementedFunctionRunnerServiceServer - rsp *v1beta1.RunFunctionResponse + rsp *fnv1beta1.RunFunctionResponse err error } -func (s *MockFunctionServer) RunFunction(context.Context, *v1beta1.RunFunctionRequest) (*v1beta1.RunFunctionResponse, error) { +func (s *MockBetaFunctionServer) RunFunction(context.Context, *fnv1beta1.RunFunctionRequest) (*fnv1beta1.RunFunctionResponse, error) { return s.rsp, s.err } diff --git a/internal/xpkg/build.go b/internal/xpkg/build.go index 0d2fed166..32ac7e962 100644 --- a/internal/xpkg/build.go +++ b/internal/xpkg/build.go @@ -124,7 +124,7 @@ func WithBase(img v1.Image) BuildOpt { } // Build compiles a Crossplane package from an on-disk package. -func (b *Builder) Build(ctx context.Context, opts ...BuildOpt) (v1.Image, runtime.Object, error) { //nolint:gocyclo // TODO(lsviben) consider refactoring +func (b *Builder) Build(ctx context.Context, opts ...BuildOpt) (v1.Image, runtime.Object, error) { bOpts := &buildOpts{ base: empty.Image, } @@ -252,7 +252,7 @@ func encode(pkg parser.Lintable) (*bytes.Buffer, error) { // SkipContains supplies a FilterFn that skips paths that contain the give pattern. func SkipContains(pattern string) parser.FilterFn { - return func(path string, info os.FileInfo) (bool, error) { + return func(path string, _ os.FileInfo) (bool, error) { return strings.Contains(path, pattern), nil } } diff --git a/internal/xpkg/fake/mocks.go b/internal/xpkg/fake/mocks.go index cc041d132..78b072f78 100644 --- a/internal/xpkg/fake/mocks.go +++ b/internal/xpkg/fake/mocks.go @@ -49,7 +49,7 @@ func NewMockCacheGetFn(rc io.ReadCloser, err error) func() (io.ReadCloser, error // NewMockCacheStoreFn creates a new MockStore function for MockCache. func NewMockCacheStoreFn(err error) func(s string, rc io.ReadCloser) error { - return func(s string, rc io.ReadCloser) error { return err } + return func(_ string, _ io.ReadCloser) error { return err } } // NewMockCacheDeleteFn creates a new MockDelete function for MockCache. diff --git a/internal/xpkg/fetch.go b/internal/xpkg/fetch.go index 5b10f3321..9ae40b97b 100644 --- a/internal/xpkg/fetch.go +++ b/internal/xpkg/fetch.go @@ -34,7 +34,7 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/errors" ) -func init() { +func init() { //nolint:gochecknoinits // See comment below. // NOTE(hasheddan): we set the logrus package-level logger to discard output // due to the fact that the AWS ECR credential helper uses it to log errors // when parsing registry server URL, which happens any time a package is @@ -59,7 +59,7 @@ type K8sFetcher struct { userAgent string } -// FetcherOpt can be used to add optional parameters to NewK8sFetcher +// FetcherOpt can be used to add optional parameters to NewK8sFetcher. type FetcherOpt func(k *K8sFetcher) error // WithCustomCA is a FetcherOpt that can be used to add a custom CA bundle to a K8sFetcher. @@ -108,9 +108,13 @@ func WithServiceAccount(sa string) FetcherOpt { // NewK8sFetcher creates a new K8sFetcher. func NewK8sFetcher(client kubernetes.Interface, opts ...FetcherOpt) (*K8sFetcher, error) { + dt, ok := remote.DefaultTransport.(*http.Transport) + if !ok { + return nil, errors.Errorf("default transport was not a %T", &http.Transport{}) + } k := &K8sFetcher{ client: client, - transport: remote.DefaultTransport.(*http.Transport).Clone(), + transport: dt.Clone(), } for _, o := range opts { diff --git a/internal/xpkg/fuzz_test.go b/internal/xpkg/fuzz_test.go index 296e55df3..359f08887 100644 --- a/internal/xpkg/fuzz_test.go +++ b/internal/xpkg/fuzz_test.go @@ -38,7 +38,7 @@ func FuzzFindXpkgInDir(f *testing.F) { fs.Remove(createdFile) } }() - for i := 0; i < noOfFiles%500; i++ { + for range noOfFiles % 500 { fname, err := ff.GetString() if err != nil { t.Skip() @@ -48,7 +48,7 @@ func FuzzFindXpkgInDir(f *testing.F) { t.Skip() } - if err = afero.WriteFile(fs, fname, fcontents, 0777); err != nil { + if err = afero.WriteFile(fs, fname, fcontents, 0o777); err != nil { t.Skip() } } @@ -56,5 +56,4 @@ func FuzzFindXpkgInDir(f *testing.F) { _, _ = FindXpkgInDir(fs, "/") _, _ = ParseNameFromMeta(fs, "/") }) - } diff --git a/internal/xpkg/lint.go b/internal/xpkg/lint.go index c8f78a1c8..40860334b 100644 --- a/internal/xpkg/lint.go +++ b/internal/xpkg/lint.go @@ -108,7 +108,7 @@ func IsFunction(o runtime.Object) error { // compatible with the package constraints. func PackageCrossplaneCompatible(v version.Operations) parser.ObjectLinterFn { return func(o runtime.Object) error { - p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) + p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1.Function{}) if !ok { return errors.New(errNotMeta) } @@ -129,7 +129,7 @@ func PackageCrossplaneCompatible(v version.Operations) parser.ObjectLinterFn { // PackageValidSemver checks that the package uses valid semver ranges. func PackageValidSemver(o runtime.Object) error { - p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1beta1.Function{}) + p, ok := TryConvertToPkg(o, &pkgmetav1.Provider{}, &pkgmetav1.Configuration{}, &pkgmetav1.Function{}) if !ok { return errors.New(errNotMeta) } diff --git a/internal/xpkg/name.go b/internal/xpkg/name.go index 6d69b13b5..1fd73fedd 100644 --- a/internal/xpkg/name.go +++ b/internal/xpkg/name.go @@ -85,7 +85,7 @@ func FriendlyID(name, hash string) string { } // ToDNSLabel converts the string to a valid DNS label. -func ToDNSLabel(s string) string { //nolint:gocyclo // TODO(negz): Document the conditions in this function. +func ToDNSLabel(s string) string { var cut strings.Builder for i := range s { b := s[i] diff --git a/internal/xpkg/reader.go b/internal/xpkg/reader.go index f9d8c01b7..afb966002 100644 --- a/internal/xpkg/reader.go +++ b/internal/xpkg/reader.go @@ -89,7 +89,7 @@ func (t *teeReadCloser) Close() error { var _ io.ReadCloser = &joinedReadCloser{} -// joinedReadCloster joins a reader and a closer. It is typically used in the +// joinedReadCloser joins a reader and a closer. It is typically used in the // context of a ReadCloser being wrapped by a Reader. type joinedReadCloser struct { r io.Reader diff --git a/internal/xpkg/scheme.go b/internal/xpkg/scheme.go index c046b45d2..cb7efc73f 100644 --- a/internal/xpkg/scheme.go +++ b/internal/xpkg/scheme.go @@ -77,7 +77,6 @@ func TryConvert(obj runtime.Object, candidates ...conversion.Hub) (runtime.Objec } for _, c := range candidates { - c := c if err := cvt.ConvertTo(c); err == nil { return c, true } diff --git a/internal/xpkg/upbound/config/config.go b/internal/xpkg/upbound/config/config.go index fa194acfd..6bac89c8d 100644 --- a/internal/xpkg/upbound/config/config.go +++ b/internal/xpkg/upbound/config/config.go @@ -88,7 +88,7 @@ const ( TokenProfileType ProfileType = "token" ) -// A Profile is a set of credentials +// A Profile is a set of credentials. type Profile struct { // ID is either a username, email, or token. ID string `json:"id"` @@ -139,14 +139,14 @@ func checkProfile(p Profile) error { } // AddOrUpdateUpboundProfile adds or updates an Upbound profile to the Config. -func (c *Config) AddOrUpdateUpboundProfile(name string, new Profile) error { - if err := checkProfile(new); err != nil { +func (c *Config) AddOrUpdateUpboundProfile(name string, p Profile) error { + if err := checkProfile(p); err != nil { return err } if c.Upbound.Profiles == nil { c.Upbound.Profiles = map[string]Profile{} } - c.Upbound.Profiles[name] = new + c.Upbound.Profiles[name] = p return nil } diff --git a/internal/xpkg/upbound/config/source.go b/internal/xpkg/upbound/config/source.go index b1d0fa28f..1dee522ac 100644 --- a/internal/xpkg/upbound/config/source.go +++ b/internal/xpkg/upbound/config/source.go @@ -29,7 +29,7 @@ import ( type Source interface { Initialize() error GetConfig() (*Config, error) - UpdateConfig(*Config) error + UpdateConfig(cfg *Config) error } // NewFSSource constructs a new FSSource. Path must be supplied via modifier or @@ -88,10 +88,10 @@ func (src *FSSource) Initialize() error { if !os.IsNotExist(err) { return err } - if err := src.fs.MkdirAll(filepath.Dir(src.path), 0755); err != nil { + if err := src.fs.MkdirAll(filepath.Dir(src.path), 0o755); err != nil { return err } - f, err := src.fs.OpenFile(src.path, os.O_CREATE, 0600) + f, err := src.fs.OpenFile(src.path, os.O_CREATE, 0o600) if err != nil { return err } @@ -123,7 +123,7 @@ func (src *FSSource) GetConfig() (*Config, error) { // UpdateConfig updates the Config in the filesystem. func (src *FSSource) UpdateConfig(c *Config) error { - f, err := src.fs.OpenFile(src.path, os.O_RDWR|os.O_TRUNC, 0600) + f, err := src.fs.OpenFile(src.path, os.O_RDWR|os.O_TRUNC, 0o600) if err != nil { return err } diff --git a/internal/xpkg/upbound/config/source_test.go b/internal/xpkg/upbound/config/source_test.go index ce66eca82..5a8e81d10 100644 --- a/internal/xpkg/upbound/config/source_test.go +++ b/internal/xpkg/upbound/config/source_test.go @@ -27,8 +27,10 @@ import ( "github.com/crossplane/crossplane-runtime/pkg/test" ) -var _ Source = &FSSource{} -var _ Source = &MockSource{} +var ( + _ Source = &FSSource{} + _ Source = &MockSource{} +) // TODO(hasheddan): a mock afero.Fs could increase test coverage here with // simulated failed file opens and writes. @@ -107,7 +109,7 @@ func TestGetConfig(t *testing.T) { func(f *FSSource) { f.path = "/.up/config.json" fs := afero.NewMemMapFs() - file, _ := fs.OpenFile("/.up/config.json", os.O_CREATE, 0600) + file, _ := fs.OpenFile("/.up/config.json", os.O_CREATE, 0o600) defer file.Close() b, _ := json.Marshal(testConf) //nolint:errchkjson // marshalling should not fail _, _ = file.Write(b) diff --git a/internal/xpkg/upbound/context.go b/internal/xpkg/upbound/context.go index 81d589c88..b9fdb1b80 100644 --- a/internal/xpkg/upbound/context.go +++ b/internal/xpkg/upbound/context.go @@ -56,14 +56,14 @@ const ( // Flags are common flags used by commands that interact with Upbound. type Flags struct { // Keep sorted alphabetically. - Account string `short:"a" env:"UP_ACCOUNT" help:"Account used to execute command." json:"account,omitempty"` - Domain *url.URL `env:"UP_DOMAIN" default:"https://upbound.io" help:"Root Upbound domain." json:"domain,omitempty"` + Account string `env:"UP_ACCOUNT" help:"Account used to execute command." json:"account,omitempty" short:"a"` + Domain *url.URL `default:"https://upbound.io" env:"UP_DOMAIN" help:"Root Upbound domain." json:"domain,omitempty"` InsecureSkipTLSVerify bool `env:"UP_INSECURE_SKIP_TLS_VERIFY" help:"[INSECURE] Skip verifying TLS certificates." json:"insecureSkipTLSVerify,omitempty"` - Profile string `env:"UP_PROFILE" help:"Profile used to execute command." predictor:"profiles" json:"profile,omitempty"` + Profile string `env:"UP_PROFILE" help:"Profile used to execute command." json:"profile,omitempty" predictor:"profiles"` // Hidden flags. - APIEndpoint *url.URL `env:"OVERRIDE_API_ENDPOINT" hidden:"" name:"override-api-endpoint" help:"Overrides the default API endpoint." json:"apiEndpoint,omitempty"` - RegistryEndpoint *url.URL `env:"OVERRIDE_REGISTRY_ENDPOINT" hidden:"" name:"override-registry-endpoint" help:"Overrides the default registry endpoint." json:"registryEndpoint,omitempty"` + APIEndpoint *url.URL `env:"OVERRIDE_API_ENDPOINT" help:"Overrides the default API endpoint." hidden:"" json:"apiEndpoint,omitempty" name:"override-api-endpoint"` + RegistryEndpoint *url.URL `env:"OVERRIDE_REGISTRY_ENDPOINT" help:"Overrides the default registry endpoint." hidden:"" json:"registryEndpoint,omitempty" name:"override-registry-endpoint"` } // Context includes common data that Upbound consumers may utilize. @@ -86,7 +86,7 @@ type Context struct { fs afero.Fs } -// Option modifies a Context +// Option modifies a Context. type Option func(*Context) // AllowMissingProfile indicates that Context should still be returned even if a @@ -98,7 +98,7 @@ func AllowMissingProfile() Option { } // NewFromFlags constructs a new context from flags. -func NewFromFlags(f Flags, opts ...Option) (*Context, error) { //nolint:gocyclo // TODO(phisco): imported as is, refactor +func NewFromFlags(f Flags, opts ...Option) (*Context, error) { p, err := config.GetDefaultPath() if err != nil { return nil, err @@ -185,10 +185,11 @@ func (c *Context) BuildSDKConfig() (*up.Config, error) { return nil, err } if c.Profile.Session != "" { - cj.SetCookies(c.APIEndpoint, []*http.Cookie{{ - Name: CookieName, - Value: c.Profile.Session, - }, + cj.SetCookies(c.APIEndpoint, []*http.Cookie{ + { + Name: CookieName, + Value: c.Profile.Session, + }, }) } tr := &http.Transport{ @@ -251,10 +252,10 @@ func (f Flags) MarshalJSON() ([]byte, error) { Domain string `json:"domain,omitempty"` Profile string `json:"profile,omitempty"` Account string `json:"account,omitempty"` - InsecureSkipTLSVerify bool `json:"insecure_skip_tls_verify,omitempty"` - APIEndpoint string `json:"override_api_endpoint,omitempty"` - ProxyEndpoint string `json:"override_proxy_endpoint,omitempty"` - RegistryEndpoint string `json:"override_registry_endpoint,omitempty"` + InsecureSkipTLSVerify bool `json:"insecure_skip_tls_verify,omitempty"` //nolint:tagliatelle // We want snake case in this file. + APIEndpoint string `json:"override_api_endpoint,omitempty"` //nolint:tagliatelle // We want snake case in this file. + ProxyEndpoint string `json:"override_proxy_endpoint,omitempty"` //nolint:tagliatelle // We want snake case in this file. + RegistryEndpoint string `json:"override_registry_endpoint,omitempty"` //nolint:tagliatelle // We want snake case in this file. }{ Domain: nullableURL(f.Domain), Profile: f.Profile, diff --git a/internal/xpkg/upbound/context_test.go b/internal/xpkg/upbound/context_test.go index fd129e83c..d46b39f98 100644 --- a/internal/xpkg/upbound/context_test.go +++ b/internal/xpkg/upbound/context_test.go @@ -19,6 +19,7 @@ package upbound import ( "fmt" "net/url" + "os" "path/filepath" "testing" @@ -81,7 +82,7 @@ func withConfig(config string) Option { return func(ctx *Context) { // establish fs and create config.json fs := afero.NewMemMapFs() - fs.MkdirAll(filepath.Dir("/.up/"), 0755) + fs.MkdirAll(filepath.Dir("/.up/"), 0o755) f, _ := fs.Create("/.up/config.json") f.WriteString(config) @@ -274,6 +275,11 @@ func TestNewFromFlags(t *testing.T) { } for name, tc := range cases { + // Unset common UP env vars used by the test to avoid unexpect behaviours describe in #5721 + os.Unsetenv("UP_ACCOUNT") + os.Unsetenv("UP_DOMAIN") + os.Unsetenv("UP_PROFILE") + os.Unsetenv("UP_INSECURE_SKIP_TLS_VERIFY") t.Run(name, func(t *testing.T) { flags := Flags{} parser, _ := kong.New(&flags) diff --git a/internal/xpkg/upbound/resolver.go b/internal/xpkg/upbound/resolver.go index b029d1141..c4179bbb3 100644 --- a/internal/xpkg/upbound/resolver.go +++ b/internal/xpkg/upbound/resolver.go @@ -39,7 +39,7 @@ func JSON(base, overlay io.Reader) (kong.Resolver, error) { return nil, err } - var f kong.ResolverFunc = func(context *kong.Context, parent *kong.Path, flag *kong.Flag) (interface{}, error) { + var f kong.ResolverFunc = func(_ *kong.Context, _ *kong.Path, flag *kong.Flag) (interface{}, error) { name := strings.ReplaceAll(flag.Name, "-", "_") bRaw, bOk := resolveValue(name, flag.Envs, baseValues) oRaw, oOk := resolveValue(name, flag.Envs, overlayValues) diff --git a/internal/xpkg/upbound/token.go b/internal/xpkg/upbound/token.go index 2237f8f75..525f70ad0 100644 --- a/internal/xpkg/upbound/token.go +++ b/internal/xpkg/upbound/token.go @@ -29,7 +29,7 @@ const errInvalidTokenFile = "token file is invalid" // TokenFile is the format in which Upbound tokens are stored on disk. type TokenFile struct { - AccessID string `json:"accessId"` + AccessID string `json:"accessId"` //nolint:tagliatelle // Should be accessID, but keeping accessId for backward compatibility. Token string `json:"token"` } diff --git a/pkg/validation/apiextensions/v1/composition/patches.go b/pkg/validation/apiextensions/v1/composition/patches.go index 7cd2c4d25..76a918126 100644 --- a/pkg/validation/apiextensions/v1/composition/patches.go +++ b/pkg/validation/apiextensions/v1/composition/patches.go @@ -90,7 +90,6 @@ func (v *Validator) validateEnvironmentPatchesWithSchemas(ctx context.Context, c compositeResGVK: compositeResGVK, }), field.NewPath("spec").Child("environment", "patches").Index(i)); err != nil { errs = append(errs, err) - } } return errs @@ -169,7 +168,7 @@ type patchValidationCtx struct { resourceGVK schema.GroupVersionKind } -func (v *Validator) validatePatchWithSchemaInternal(ctx patchValidationCtx) *field.Error { //nolint:gocyclo // mainly due to the switch, not much to refactor +func (v *Validator) validatePatchWithSchemaInternal(ctx patchValidationCtx) *field.Error { var validationErr *field.Error var fromType, toType xpschema.KnownJSONType switch ctx.patch.GetType() { @@ -328,7 +327,6 @@ func validateTransformsChainIOTypes(transforms []v1.Transform, fromType xpschema return "", field.InternalError(field.NewPath("transforms"), err) } for i, transform := range transforms { - transform := transform err := IsValidInputForTransform(&transform, inputType) if err != nil && inputType != "" { return "", field.Invalid(field.NewPath("transforms").Index(i), transform, err.Error()) @@ -399,7 +397,7 @@ func validateFieldPathSegment(parent *apiextensions.JSONSchemaProps, segment fie return nil, nil } -func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segment fieldpath.Segment) (*apiextensions.JSONSchemaProps, error) { //nolint:gocyclo // inherently complex +func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segment fieldpath.Segment) (*apiextensions.JSONSchemaProps, error) { if parent == nil { return nil, nil } @@ -427,7 +425,6 @@ func validateFieldPathSegmentField(parent *apiextensions.JSONSchemaProps, segmen return validateFieldPathSegmentField(parent.AdditionalProperties.Schema, segment) } return nil, errors.Errorf(errFmtFieldInvalid, segment.Field) - } return &prop, nil } @@ -462,8 +459,6 @@ func validateFieldPathSegmentIndex(parent *apiextensions.JSONSchemaProps, segmen } // IsValidInputForTransform validates the supplied Transform type, taking into consideration also the input type. -// -//nolint:gocyclo // This is a long but simple/same-y switch. func IsValidInputForTransform(t *v1.Transform, fromType v1.TransformIOType) error { switch t.Type { case v1.TransformTypeMath: @@ -530,7 +525,7 @@ func GetBaseObject(ct *v1.ComposedTemplate) (client.Object, error) { ct.Base.Object = cd } if ct, ok := ct.Base.Object.(client.Object); ok { - return ct.DeepCopyObject().(client.Object), nil + return ct.DeepCopyObject().(client.Object), nil //nolint:forcetypeassert // Deepcopy will always be the same type. } return nil, errors.New("base object is not a client.Object") } diff --git a/pkg/validation/apiextensions/v1/composition/patches_test.go b/pkg/validation/apiextensions/v1/composition/patches_test.go index b72cfa1a3..be0cbbb03 100644 --- a/pkg/validation/apiextensions/v1/composition/patches_test.go +++ b/pkg/validation/apiextensions/v1/composition/patches_test.go @@ -33,6 +33,7 @@ import ( xperrors "github.com/crossplane/crossplane-runtime/pkg/errors" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" + "github.com/crossplane/crossplane-runtime/pkg/resource/unstructured/composed" "github.com/crossplane/crossplane-runtime/pkg/test" v1 "github.com/crossplane/crossplane/apis/apiextensions/v1" @@ -366,7 +367,14 @@ func TestValidateFieldPath(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "forProvider": { Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: "string"}}}}}}}}, + "foo": {Type: "string"}, + }, + }, + }, + }, + }, + }, + }, }, "AcceptMetadataLabelsValue": { reason: "Should validate a valid field path", @@ -391,7 +399,14 @@ func TestValidateFieldPath(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "forProvider": { Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: "string"}}}}}}}}, + "foo": {Type: "string"}, + }, + }, + }, + }, + }, + }, + }, }, "AcceptFieldPathXPreserveUnknownFields": { reason: "Should not return an error for an undefined but accepted field path", @@ -404,9 +419,15 @@ func TestValidateFieldPath(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "forProvider": { Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: "string"}}, + "foo": {Type: "string"}, + }, XPreserveUnknownFields: &[]bool{true}[0], - }}}}}}, + }, + }, + }, + }, + }, + }, }, "AcceptValidArray": { reason: "Should validate arrays properly", @@ -424,7 +445,18 @@ func TestValidateFieldPath(t *testing.T) { Items: &apiextensions.JSONSchemaPropsOrArray{ Schema: &apiextensions.JSONSchemaProps{ Properties: map[string]apiextensions.JSONSchemaProps{ - "bar": {Type: "string"}}}}}}}}}}}}, + "bar": {Type: "string"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, }, "AcceptComplexSchema": { reason: "Should validate properly with complex schema", @@ -461,6 +493,14 @@ func TestValidateFieldPath(t *testing.T) { schema: &apiextensions.JSONSchemaProps{Properties: map[string]apiextensions.JSONSchemaProps{"metadata": {Type: "object"}}}, }, }, + "AcceptMetadataGenerateName": { + reason: "Should accept metadata.generateName", + want: want{err: nil, fieldType: "string"}, + args: args{ + fieldPath: "metadata.generateName", + schema: &apiextensions.JSONSchemaProps{Properties: map[string]apiextensions.JSONSchemaProps{"metadata": {Type: "object"}}}, + }, + }, "AcceptXPreserveUnknownFieldsInAdditionalProperties": { reason: "Should properly handle x-preserve-unknown-fields even if defined in a nested schema", want: want{err: nil, fieldType: ""}, @@ -475,7 +515,10 @@ func TestValidateFieldPath(t *testing.T) { XPreserveUnknownFields: &[]bool{true}[0], }, }, - }}}}, + }, + }, + }, + }, }, "AcceptAnnotations": { want: want{err: nil, fieldType: "string"}, @@ -896,7 +939,6 @@ func TestGetSchemaForVersion(t *testing.T) { } }) } - } func TestComposedTemplateGetBaseObject(t *testing.T) { @@ -922,12 +964,14 @@ func TestComposedTemplateGetBaseObject(t *testing.T) { }, }, want: want{ - output: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "v1", - "kind": "Service", - "metadata": map[string]interface{}{ - "name": "foo", + output: &composed.Unstructured{ + Unstructured: unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Service", + "metadata": map[string]interface{}{ + "name": "foo", + }, }, }, }, diff --git a/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go b/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go index 9f76fb2cf..4b555a7f4 100644 --- a/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go +++ b/pkg/validation/apiextensions/v1/composition/readinessChecks_test.go @@ -98,7 +98,12 @@ func TestValidateReadinessCheck(t *testing.T) { FieldPath: "spec.someOtherField", }, )), - gkToCRD: defaultGKToCRDs(), + gkToCRD: buildGkToCRDs( + defaultManagedCrdBuilder().withOption(func(crd *extv1.CustomResourceDefinition) { + crd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["spec"].Properties["someOtherField"] = extv1.JSONSchemaProps{ + Type: "boolean", + } + }).build()), }, want: want{ errs: nil, @@ -114,7 +119,12 @@ func TestValidateReadinessCheck(t *testing.T) { FieldPath: "spec.someOtherField", }, )), - gkToCRD: defaultGKToCRDs(), + gkToCRD: buildGkToCRDs( + defaultManagedCrdBuilder().withOption(func(crd *extv1.CustomResourceDefinition) { + crd.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["spec"].Properties["someOtherField"] = extv1.JSONSchemaProps{ + Type: "boolean", + } + }).build()), }, want: want{ errs: nil, @@ -237,7 +247,6 @@ func TestValidateReadinessCheck(t *testing.T) { crd.Spec.Versions[1].Schema.OpenAPIV3Schema.Properties["spec"].Properties["someField"] = extv1.JSONSchemaProps{ Type: "integer", } - }).build()), }, want: want{ @@ -291,7 +300,7 @@ func TestValidateReadinessCheck(t *testing.T) { t.Fatalf("NewValidator() error = %v", err) } got := v.validateReadinessChecksWithSchemas(context.TODO(), tt.args.comp) - if diff := cmp.Diff(got, tt.want.errs, sortFieldErrors(), cmpopts.IgnoreFields(field.Error{}, "Detail")); diff != "" { + if diff := cmp.Diff(tt.want.errs, got, sortFieldErrors(), cmpopts.IgnoreFields(field.Error{}, "Detail")); diff != "" { t.Errorf("validateReadinessChecksWithSchemas(...) = -want, +got\n%s\n", diff) } }) diff --git a/pkg/validation/apiextensions/v1/composition/schema.go b/pkg/validation/apiextensions/v1/composition/schema.go index 061284f66..848fabf8f 100644 --- a/pkg/validation/apiextensions/v1/composition/schema.go +++ b/pkg/validation/apiextensions/v1/composition/schema.go @@ -6,7 +6,7 @@ import ( "github.com/crossplane/crossplane/pkg/validation/internal/schema" ) -// sets all the defaults in the given schema +// sets all the defaults in the given schema. func defaultMetadataSchema(in *apiextensions.JSONSchemaProps) *apiextensions.JSONSchemaProps { out := in if out == nil { @@ -26,11 +26,13 @@ func defaultMetadataSchema(in *apiextensions.JSONSchemaProps) *apiextensions.JSO return out } + func defaultMetadataOnly(metadata *apiextensions.JSONSchemaProps) *apiextensions.JSONSchemaProps { setDefaultType(metadata) setDefaultProperty(metadata, "name", string(schema.KnownJSONTypeString)) setDefaultProperty(metadata, "namespace", string(schema.KnownJSONTypeString)) setDefaultProperty(metadata, "uid", string(schema.KnownJSONTypeString)) + setDefaultProperty(metadata, "generateName", string(schema.KnownJSONTypeString)) setDefaultLabels(metadata) setDefaultAnnotations(metadata) return metadata diff --git a/pkg/validation/apiextensions/v1/composition/schema_test.go b/pkg/validation/apiextensions/v1/composition/schema_test.go index 5154f859d..07f8c5cc6 100644 --- a/pkg/validation/apiextensions/v1/composition/schema_test.go +++ b/pkg/validation/apiextensions/v1/composition/schema_test.go @@ -16,6 +16,7 @@ func getDefaultMetadataSchema() *apiextensions.JSONSchemaProps { func getDefaultSchema() *apiextensions.JSONSchemaProps { return defaultMetadataSchema(&apiextensions.JSONSchemaProps{}) } + func TestDefaultMetadataSchema(t *testing.T) { type args struct { in *apiextensions.JSONSchemaProps @@ -61,14 +62,18 @@ func TestDefaultMetadataSchema(t *testing.T) { }, "SpecPreserved": { reason: "Other properties should be preserved", - args: args{in: &apiextensions.JSONSchemaProps{ - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "spec": { - Type: string(schema.KnownJSONTypeObject), - AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ - Allows: true, - }}}}, + args: args{ + in: &apiextensions.JSONSchemaProps{ + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "spec": { + Type: string(schema.KnownJSONTypeObject), + AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ + Allows: true, + }, + }, + }, + }, }, want: want{ out: &apiextensions.JSONSchemaProps{ @@ -79,7 +84,11 @@ func TestDefaultMetadataSchema(t *testing.T) { Type: string(schema.KnownJSONTypeObject), AdditionalProperties: &apiextensions.JSONSchemaPropsOrBool{ Allows: true, - }}}}}, + }, + }, + }, + }, + }, }, "MetadataNotOverwrite": { reason: "Other properties should not be overwritten in metadata if specified in the default", @@ -91,7 +100,11 @@ func TestDefaultMetadataSchema(t *testing.T) { Properties: map[string]apiextensions.JSONSchemaProps{ "name": { Type: string(schema.KnownJSONTypeBoolean), - }}}}}}, + }, + }, + }, + }, + }}, want: want{ out: func() *apiextensions.JSONSchemaProps { s := getDefaultSchema() @@ -106,16 +119,23 @@ func TestDefaultMetadataSchema(t *testing.T) { }, "MetadataPreserved": { reason: "Other properties should be preserved in if not specified in the default", - args: args{in: &apiextensions.JSONSchemaProps{ - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "metadata": { - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "annotations": { - Type: string(schema.KnownJSONTypeObject), - Properties: map[string]apiextensions.JSONSchemaProps{ - "foo": {Type: string(schema.KnownJSONTypeString)}}}}}}}, + args: args{ + in: &apiextensions.JSONSchemaProps{ + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "metadata": { + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "annotations": { + Type: string(schema.KnownJSONTypeObject), + Properties: map[string]apiextensions.JSONSchemaProps{ + "foo": {Type: string(schema.KnownJSONTypeString)}, + }, + }, + }, + }, + }, + }, }, want: want{ out: func() *apiextensions.JSONSchemaProps { diff --git a/pkg/validation/apiextensions/v1/composition/validator_test.go b/pkg/validation/apiextensions/v1/composition/validator_test.go index b8a09b693..84454e8cd 100644 --- a/pkg/validation/apiextensions/v1/composition/validator_test.go +++ b/pkg/validation/apiextensions/v1/composition/validator_test.go @@ -91,7 +91,7 @@ func TestValidatorValidate(t *testing.T) { }, }, "RejectStrictInvalidFromFieldPath": { - reason: "Should reject a Composition with a patch using a field not allowed by the the Composite resource, if all CRDs are found", + reason: "Should reject a Composition with a patch using a field not allowed by the Composite resource, if all CRDs are found", want: want{ errs: field.ErrorList{ { @@ -320,7 +320,8 @@ func TestValidatorValidate(t *testing.T) { Type: v1.PatchTypeFromCompositeFieldPath, FromFieldPath: ptr.To("spec.someField"), ToFieldPath: ptr.To("spec.someOtherField"), - }}}, + }}, + }, ), withPatches(0, v1.Patch{ Type: v1.PatchTypePatchSet, PatchSetName: ptr.To("some-patch-set"), @@ -360,7 +361,8 @@ func TestValidatorValidate(t *testing.T) { }, }, ToFieldPath: ptr.To("spec.someOtherField"), - }}}, + }}, + }, ), withPatches(0, v1.Patch{ Type: v1.PatchTypePatchSet, PatchSetName: ptr.To("some-patch-set"), @@ -660,8 +662,10 @@ func sortFieldErrors() cmp.Option { }) } -const testGroup = "resources.test.com" -const testGroupSingular = "resource.test.com" +const ( + testGroup = "resources.test.com" + testGroupSingular = "resource.test.com" +) func marshalJSON(t *testing.T, obj interface{}) []byte { t.Helper() @@ -710,7 +714,6 @@ func defaultGKToCRDs() map[schema.GroupKind]apiextensions.CustomResourceDefiniti crds := []apiextensions.CustomResourceDefinition{*defaultManagedCrdBuilder().build(), *defaultCompositeCrdBuilder().build()} m := make(map[schema.GroupKind]apiextensions.CustomResourceDefinition, len(crds)) for _, crd := range crds { - crd := crd m[schema.GroupKind{ Group: crd.Spec.Group, Kind: crd.Spec.Names.Kind, diff --git a/pkg/validation/internal/schema/schema_test.go b/pkg/validation/internal/schema/schema_test.go index 76c9e4bad..2c88b7544 100644 --- a/pkg/validation/internal/schema/schema_test.go +++ b/pkg/validation/internal/schema/schema_test.go @@ -223,19 +223,15 @@ func TestFromKnownJSONType(t *testing.T) { out: v1.TransformIOTypeBool, }, }, - "InvalidArray": { - reason: "Array should not be valid", + "ValidArray": { + reason: "Array should be valid and convert properly", args: args{t: KnownJSONTypeArray}, - want: want{ - err: xperrors.Errorf(errFmtUnsupportedJSONType, KnownJSONTypeArray), - }, + want: want{out: v1.TransformIOTypeObject}, }, - "InvalidObject": { - reason: "Object should not be valid", + "ValidObject": { + reason: "Object should be valid and convert properly", args: args{t: KnownJSONTypeObject}, - want: want{ - err: xperrors.Errorf(errFmtUnsupportedJSONType, KnownJSONTypeObject), - }, + want: want{out: v1.TransformIOTypeObject}, }, } for name, tc := range cases { diff --git a/test/e2e/README.md b/test/e2e/README.md index 334d418ab..8293338f9 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -17,72 +17,41 @@ All Crossplane features must be exercised by these tests, as well as unit tests. ## Running Tests -Run `make e2e` to run E2E tests. +Run `earthly -P +e2e` to run E2E tests. -This compiles Crossplane and an E2E test binary. It then runs the test binary. -Use the `E2E_TEST_FLAGS` to pass flags to the test binary. For example: +This compiles Crossplane and an E2E test binary. It then uses the test binary to +run the base test suite. Use the `FLAGS` to pass flags to the test binary. For +example: ```shell -# Most tests use t.Log to explain what they're doing. Use the -test.v flag -# (equivalent to go test -v) to see detailed test progress and logs. -E2E_TEST_FLAGS="-test.v" make e2e - # Some functions that setup the test environment (e.g. kind) use the klog logger # The -v flag controls the verbosity of klog. Use -v=4 for debug logging. -E2E_TEST_FLAGS="-test.v -v=4" make e2e +earthly -P +e2e --FLAGS="-v=4" # To run only a specific test, match it by regular expression -E2E_TEST_FLAGS="-test.run ^TestConfiguration" make e2e +earthly -P +e2e --FLAGS="-test.run ^TestConfiguration" # To test features with certain labels, use the labels flag -E2E_TEST_FLAGS="-labels area=apiextensions" make e2e +earthly -P +e2e --FLAGS="-labels area=apiextensions" # To test a specific feature, use the feature flag -E2E_TEST_FLAGS="-feature=ConfigurationWithDependency" make e2e +earthly -P +e2e --FLAGS="-feature=ConfigurationWithDependency" # Stop immediately on first test failure, and leave the kind cluster to debug. -E2E_TEST_FLAGS="-test.v -test.failfast -destroy-kind-cluster=false" - -# Use an existing Kubernetes cluster. Note that the E2E tests can't deploy your -# local build of Crossplane in this scenario, so you'll have to do it yourself. -E2E_TEST_FLAGS="-create-kind-cluster=false -destroy-kind-cluster=false -kubeconfig=$HOME/.kube/config" make e2e - -# Run the CrossplaneUpgrade feature, against an existing kind cluster named -# "kind" (or creating it if it doesn't exist), # without installing Crossplane -# first, as the feature expects the cluster to be empty, but still loading the -# images to it. Setting the tests to fail fast and not destroying the cluster -# afterward in order to allow debugging it. -E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ - -destroy-kind-cluster=false \ - -kind-cluster-name=kind \ - -preinstall-crossplane=false \ - -feature=CrossplaneUpgrade" make e2e - -# Run all the tests not installing or upgrading Crossplane against the currently -# selected cluster where Crossplane has already been installed. -E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ - -kubeconfig=$HOME/.kube/config \ - -skip-labels modify-crossplane-installation=true \ - -create-kind-cluster=false \ - -preinstall-crossplane=false" make go.build e2e-run-tests - -# Run the composition-webhook-schema-validation suite of tests, which will -# result in all tests marked as "test-suite=base" or -# "test-suite=composition-webhook-schema-validation" being run against a kind -# cluster with Crossplane installed with composition-webhook-schema-validation -# enabled -E2E_TEST_FLAGS="-test.v -v 4 -test.failfast \ - -test-suite=composition-webhook-schema-validation " make e2e +earthly -i -P +e2e --FLAGS="-test.failfast -destroy-kind-cluster=false" + +# Run a specific test suite. +earthly -P +e2e --FLAGS="-test.v -test-suite=composition-webhook-schema-validation" ``` ## Test Parallelism -`make e2e` runs all defined E2E tests serially. Tests do not run in parallel. -This is because all tests run against the same API server and Crossplane has a -lot of cluster-scoped state - XRDs, Providers, Compositions, etc. It's easier -and less error-prone to write tests when you don't have to worry about one test -potentially conflicting with another - for example by installing the same -provider another test would install. +`earthly -P +e2e` runs all defined E2E tests serially. Tests do not run in +parallel. This is because all tests run against the same API server and +Crossplane has a lot of cluster-scoped state - XRDs, Providers, Compositions, +etc. It's easier and less error-prone to write tests when you don't have to +worry about one test potentially conflicting with another - for example by +installing the same provider another test would install. The [CI GitHub workflow] uses a matrix strategy to run multiple jobs in parallel, each running a test suite, see the dedicated section for more details. @@ -146,7 +115,7 @@ We try to follow this pattern when adding a new test: `github.com/crossplane/crossplane/test/e2e/funcs`, or add new ones there if needed. 1. Prefer using the Fluent APIs to define features - (`features.New(...).WithSetup(...).Assess(...).WithTeardown(...).Feature()`). + (`features.NewWithDescription(...).WithSetup(...).AssessWithDescription(...).WithTeardown(...).Feature()`). 1. `features.Table` should be used only to define multiple self-contained assessments to be run sequentially, but without assuming any ordering among them, similarly to the usual table driven style we adopt for unit testing. @@ -155,8 +124,8 @@ We try to follow this pattern when adding a new test: a feature, as they allow to provide a description. 1. Use short but explicative `CamelCase` sentences as descriptions for everything used to define the name of tests/subtests, e.g. - `features.New("CrossplaneUpgrade", ...)` `WithSetup("InstallProviderNop", - ...)`, `Assess("ProviderNopIsInstalled", ...)`, + `features.NewWithDescription("CrossplaneUpgrade", ...)` `WithSetup("InstallProviderNop", + ...)`, `AssessWithDescription("ProviderNopIsInstalled", ...)`, `WithTeardown("UninstallProviderNop", ...)`. 1. Use the `Setup` and `Teardown` phases to define respectively actions that are not strictly part of the feature being tested, but are needed to make it @@ -194,29 +163,31 @@ Here an example of a test following the above guidelines: ```go package e2e +import "sigs.k8s.io/e2e-framework/pkg/features" + // ... // TestSomeFeature ... func TestSomeFeature(t *testing.T) { - manifests := "test/e2e/manifests/pkg/some-area/some-feature" - namespace := "some-namespace" - // ... other variables or constants ... - - environment.Test(t, - features.New(t.Name()). - WithLabel(LabelArea, ...). - WithLabel(LabelSize, ...). - WithLabel(config.LabelTestSuite, config.TestSuiteDefault). - // ... - WithSetup("ReadyPrerequisites", ... ). - // ... other setup steps ... - Assess("DoSomething", ... ). - Assess("SomethingElseIsInSomeState", ... ). - // ... other assess steps ... - WithTeardown("DeleteCreatedResources", ...). - // ... other teardown steps ... - Feature(), - ) + manifests := "test/e2e/manifests/pkg/some-area/some-feature" + namespace := "some-namespace" + // ... other variables or constants ... + + environment.Test(t, + features.NewWithDescription(t.Name(), ...). + WithLabel(LabelArea, ...). + WithLabel(LabelSize, ...). + WithLabel(config.LabelTestSuite, config.TestSuiteDefault). + // ... + WithSetup("ReadyPrerequisites", ...). + // ... other setup steps ... + AssessWithDescription("DoSomething", ...). + AssessWithDescription("SomethingElseIsInSomeState", ...). + // ... other assess steps ... + WithTeardown("DeleteCreatedResources", ...). + // ... other teardown steps ... + Feature(), + ) } // ... diff --git a/test/e2e/apiextensions_test.go b/test/e2e/apiextensions_test.go index 4d66b8561..b69d27196 100644 --- a/test/e2e/apiextensions_test.go +++ b/test/e2e/apiextensions_test.go @@ -21,6 +21,7 @@ import ( "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/e2e-framework/pkg/features" "sigs.k8s.io/e2e-framework/third_party/helm" @@ -54,17 +55,11 @@ func init() { // extensions (i.e. Composition, XRDs, etc). const LabelAreaAPIExtensions = "apiextensions" -var ( - nopList = composed.NewList(composed.FromReferenceToList(corev1.ObjectReference{ - APIVersion: "nop.crossplane.io/v1alpha1", - Kind: "NopResource", - })) -) +var nopList = composed.NewList(composed.FromReferenceToList(corev1.ObjectReference{ + APIVersion: "nop.crossplane.io/v1alpha1", + Kind: "NopResource", +})) -// TestCompositionMinimal tests Crossplane's Composition functionality, -// checking that a claim using a very minimal Composition (with no patches, -// transforms, or functions) will become available when its composed -// resources do. func TestCompositionMinimal(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/minimal" @@ -78,7 +73,7 @@ func TestCompositionMinimal(t *testing.T) { })) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality, checking that a claim using a very minimal Composition (with no patches, transforms, or functions) will become available when its composed resources do."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -105,14 +100,51 @@ func TestCompositionMinimal(t *testing.T) { ) } -// TestCompositionPatchAndTransform tests Crossplane's Composition functionality, -// checking that a claim using patch-and-transform Composition will become -// available when its composed resources do, and have a field derived from -// the patch. +func TestCompositionInvalidComposed(t *testing.T) { + manifests := "test/e2e/manifests/apiextensions/composition/invalid-composed" + + xrList := composed.NewList(composed.FromReferenceToList(corev1.ObjectReference{ + APIVersion: "example.org/v1alpha1", + Kind: "XParent", + }), composed.FromReferenceToList(corev1.ObjectReference{ + APIVersion: "example.org/v1alpha1", + Kind: "XChild", + })) + + environment.Test(t, + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality, checking that although a composed resource is invalid, i.e. it didn't apply successfully."). + WithLabel(LabelArea, LabelAreaAPIExtensions). + WithLabel(LabelSize, LabelSizeSmall). + WithLabel(config.LabelTestSuite, config.TestSuiteDefault). + WithSetup("PrerequisitesAreCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + funcs.ResourcesHaveConditionWithin(2*time.Minute, manifests, "setup/provider.yaml", pkgv1.Healthy(), pkgv1.Active()), + )). + Assess("CreateXR", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "xr.yaml"), + funcs.InBackground(funcs.LogResources(xrList)), + funcs.InBackground(funcs.LogResources(nopList)), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "xr.yaml"), + )). + Assess("XRStillAnnotated", funcs.AllOf( + // Check the XR it has metadata.annotations set + funcs.ResourcesHaveFieldValueWithin(1*time.Minute, manifests, "xr.yaml", "metadata.annotations[exampleVal]", "foo"), + )). + WithTeardown("DeleteXR", funcs.AllOf( + funcs.DeleteResources(manifests, "xr.yaml"), + funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "xr.yaml"), + )). + WithTeardown("DeletePrerequisites", funcs.ResourcesDeletedAfterListedAreGone(3*time.Minute, manifests, "setup/*.yaml", nopList)). + Feature(), + ) +} + func TestCompositionPatchAndTransform(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/patch-and-transform" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality, checking that a claim using patch-and-transform Composition will become available when its composed resources do, and have a field derived from the patch."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -140,13 +172,10 @@ func TestCompositionPatchAndTransform(t *testing.T) { ) } -// TestCompositionRealtimeRevisionSelection tests Crossplane's Composition -// functionality to react in realtime to changes in a Composition by selecting -// the new CompositionRevision and reconcile the XRs. func TestCompositionRealtimeRevisionSelection(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/realtime-revision-selection" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests Crossplane's Composition functionality to react in realtime to changes in a Composition by selecting the new CompositionRevision and reconcile the XRs."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -178,14 +207,10 @@ func TestCompositionRealtimeRevisionSelection(t *testing.T) { ) } -// TODO(negz): How do we want to handle beta features? They're on by default. -// Maybe in this case add a test suite that tests P&T when Functions are -// disabled? - func TestCompositionFunctions(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/functions" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests the correct functioning of composition functions ensuring that the composed resources are created, conditions are met, fields are patched, and resources are properly cleaned up when deleted."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -216,7 +241,7 @@ func TestCompositionFunctions(t *testing.T) { func TestPropagateFieldsRemovalToXR(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that field removals in a claim are correctly propagated to the associated composite resource (XR), ensuring that updates and deletions are properly synchronized, and that the status from the XR is accurately reflected back to the claim."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -270,7 +295,7 @@ func TestPropagateFieldsRemovalToXR(t *testing.T) { func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that field removals in a composite resource (XR) are correctly propagated after upgrading the field managers from CSA to SSA, verifying that the upgrade process does not interfere with the synchronization of fields between the claim and the XR."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -300,7 +325,10 @@ func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToSuite(SuiteSSAClaims)), funcs.ReadyToTestWithin(1*time.Minute, namespace), )). - Assess("UpdateClaim", funcs.ApplyClaim(FieldManager, manifests, "claim-update.yaml")). + Assess("UpdateClaim", funcs.AllOf( + funcs.ApplyClaim(FieldManager, manifests, "claim-update.yaml"), + funcs.ClaimUnderTestMustNotChangeWithin(1*time.Minute), + )). Assess("FieldsRemovalPropagatedToXR", funcs.AllOf( // Updates and deletes are propagated claim -> XR. funcs.CompositeResourceHasFieldValueWithin(1*time.Minute, manifests, "claim.yaml", "metadata.labels[foo]", "1"), @@ -332,10 +360,56 @@ func TestPropagateFieldsRemovalToXRAfterUpgrade(t *testing.T) { ) } +func TestPropagateFieldsRemovalToComposed(t *testing.T) { + manifests := "test/e2e/manifests/apiextensions/composition/propagate-field-removals" + environment.Test(t, + features.NewWithDescription(t.Name(), "Tests Crossplane's end-to-end SSA syncing functionality of clear propagation of fields from claim->XR->MR, when existing composition and resources are migrated from native P-and-T to functions pipeline mode."). + WithLabel(LabelArea, LabelAreaAPIExtensions). + WithLabel(LabelSize, LabelSizeSmall). + WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). + WithLabel(config.LabelTestSuite, SuiteSSAClaims). + WithSetup("EnableSSAClaims", funcs.AllOf( + funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToSuite(SuiteSSAClaims)), + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + WithSetup("PrerequisitesAreCreated", funcs.AllOf( + funcs.ApplyResources(FieldManager, manifests, "setup/*.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "setup/*.yaml"), + funcs.ResourcesHaveConditionWithin(1*time.Minute, manifests, "setup/definition.yaml", apiextensionsv1.WatchingComposite()), + )). + Assess("CreateClaim", funcs.AllOf( + funcs.ApplyClaim(FieldManager, manifests, "claim.yaml"), + funcs.ResourcesCreatedWithin(30*time.Second, manifests, "claim.yaml"), + funcs.ResourcesHaveConditionWithin(5*time.Minute, manifests, "claim.yaml", xpv1.Available()), + )). + Assess("ConvertToPipelineCompositionUpgrade", funcs.ApplyResources(FieldManager, manifests, "composition-xfn.yaml")). + Assess("UpdateClaim", funcs.ApplyClaim(FieldManager, manifests, "claim-update.yaml")). + Assess("FieldsRemovalPropagatedToMR", funcs.AllOf( + // field removals and updates are propagated claim -> XR -> MR, after converting composition from native to pipeline mode + funcs.ComposedResourcesHaveFieldValueWithin(1*time.Minute, manifests, "claim.yaml", + "spec.forProvider.fields.tags[newtag]", funcs.NotFound, + funcs.FilterByGK(schema.GroupKind{Group: "nop.crossplane.io", Kind: "NopResource"})), + funcs.ComposedResourcesHaveFieldValueWithin(1*time.Minute, manifests, "claim.yaml", + "spec.forProvider.fields.tags[tag]", "v1", + funcs.FilterByGK(schema.GroupKind{Group: "nop.crossplane.io", Kind: "NopResource"})), + )). + WithTeardown("DeleteClaim", funcs.AllOf( + funcs.DeleteResources(manifests, "claim.yaml"), + funcs.ResourcesDeletedWithin(2*time.Minute, manifests, "claim.yaml"), + )). + WithTeardown("DeletePrerequisites", funcs.ResourcesDeletedAfterListedAreGone(3*time.Minute, manifests, "setup/*.yaml", nopList)). + WithTeardown("DisableSSAClaims", funcs.AllOf( + funcs.AsFeaturesFunc(environment.HelmUpgradeCrossplaneToBase()), // Disable our feature flag. + funcs.ReadyToTestWithin(1*time.Minute, namespace), + )). + Feature(), + ) +} + func TestCompositionSelection(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/composition-selection" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that label selectors in a claim are correctly propagated to the composite resource (XR), ensuring that the appropriate composition is selected and remains consistent even after updates to the label selectors."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -394,7 +468,7 @@ func TestCompositionSelection(t *testing.T) { func TestBindToExistingXR(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/composition/bind-existing-xr" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that a new claim can successfully bind to an existing composite resource (XR), ensuring that the XR’s fields are updated according to the claim’s specifications and that the XR is correctly managed when the claim is deleted."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). diff --git a/test/e2e/comp_schema_validation_test.go b/test/e2e/comp_schema_validation_test.go index 96601fb06..48cbaa540 100644 --- a/test/e2e/comp_schema_validation_test.go +++ b/test/e2e/comp_schema_validation_test.go @@ -47,6 +47,11 @@ func TestCompositionValidation(t *testing.T) { funcs.ResourcesCreatedWithin(30*time.Second, manifests, "composition-warn-valid.yaml"), ), }, + { + // A composition that updates immutable fields should be rejected when validated in strict mode. + Name: "ImmutableCompositionFieldUpdateIsRejectedStrictMode", + Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "composition-invalid-immutable.yaml"), + }, } environment.Test(t, cases.Build(t.Name()). diff --git a/test/e2e/config/environment.go b/test/e2e/config/environment.go index b683650be..bc20a0d6b 100644 --- a/test/e2e/config/environment.go +++ b/test/e2e/config/environment.go @@ -103,7 +103,7 @@ func NewEnvironmentFromFlags() Environment { suites: map[string]testSuite{}, } c.kindClusterName = flag.String("kind-cluster-name", "", "name of the kind cluster to use") - c.kindLogsLocation = flag.String("kind-logs-location", "", "destination of the the kind cluster logs on failure") + c.kindLogsLocation = flag.String("kind-logs-location", "", "destination of the kind cluster logs on failure") c.createKindCluster = flag.Bool("create-kind-cluster", true, "create a kind cluster (and deploy Crossplane) before running tests, if the cluster does not already exist with the same name") c.destroyKindCluster = flag.Bool("destroy-kind-cluster", true, "destroy the kind cluster when tests complete") c.preinstallCrossplane = flag.Bool("preinstall-crossplane", true, "install Crossplane before running tests") @@ -189,7 +189,7 @@ func (e *Environment) HelmInstallBaseCrossplane() env.Func { } // getSuiteInstallOpts returns the helm install options for the specified -// suite, appending additional specified ones +// suite, appending additional specified ones. func (e *Environment) getSuiteInstallOpts(suite string, extra ...helm.Option) []helm.Option { p, ok := e.suites[suite] if !ok { @@ -238,7 +238,7 @@ func WithoutBaseDefaultTestSuite() TestSuiteOpt { } // WithLabelsToSelect sets the provided testSuite to include the provided -// labels, if not already specified by the user +// labels, if not already specified by the user. func WithLabelsToSelect(labels features.Labels) TestSuiteOpt { return func(suite *testSuite) { suite.labelsToSelect = labels diff --git a/test/e2e/consts.go b/test/e2e/consts.go index c29388ded..e49d91d6f 100644 --- a/test/e2e/consts.go +++ b/test/e2e/consts.go @@ -1,3 +1,20 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package e2e implements end-to-end tests for Crossplane. package e2e // LabelArea represents the 'area' of a feature. For example 'apiextensions', diff --git a/test/e2e/environmentconfig_test.go b/test/e2e/environmentconfig_test.go index 959ea9c28..d83832d8a 100644 --- a/test/e2e/environmentconfig_test.go +++ b/test/e2e/environmentconfig_test.go @@ -49,7 +49,7 @@ func TestEnvironmentConfigDefault(t *testing.T) { subfolder := "default" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that environment config defaults are correctly applied and annotated in managed resources."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -101,7 +101,7 @@ func TestEnvironmentResolutionOptional(t *testing.T) { subfolder := "resolutionOptional" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that optional environment resolution works correctly, ensuring that managed resources are properly annotated with values derived from environment config."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -153,7 +153,7 @@ func TestEnvironmentResolveIfNotPresent(t *testing.T) { subfolder := "resolveIfNotPresent" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that environment configurations are resolved and applied only if they are not already present, verifying that additional environment configurations do not override existing annotations."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -215,7 +215,7 @@ func TestEnvironmentResolveAlways(t *testing.T) { subfolder := "resolveAlways" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that environment configurations are always resolved and applied to managed resources, ensuring that any changes in environment configurations are reflected in the annotations of the managed resources."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -277,7 +277,7 @@ func TestEnvironmentConfigMultipleMaxMatchNil(t *testing.T) { subfolder := "multipleModeMaxMatchNil" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that when multiple environment configurations are available, the correct maximum match is selected and applied, ensuring that the managed resources are annotated correctly."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). @@ -324,11 +324,12 @@ func TestEnvironmentConfigMultipleMaxMatchNil(t *testing.T) { Feature(), ) } + func TestEnvironmentConfigMultipleMaxMatch1(t *testing.T) { subfolder := "multipleModeMaxMatch1" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that when multiple environment configurations are available, the configuration with the highest match is correctly selected and applied to the managed resources."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). diff --git a/test/e2e/funcs/collect.go b/test/e2e/funcs/collect.go index 2db730761..0bf265b15 100644 --- a/test/e2e/funcs/collect.go +++ b/test/e2e/funcs/collect.go @@ -18,8 +18,8 @@ package funcs import ( "context" - "fmt" "strings" + "testing" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -51,7 +51,9 @@ type coordinate struct { // // Note: this is a pretty expensive operation only suited for e2e tests with // small clusters. -func buildRelatedObjectGraph(ctx context.Context, discoveryClient discovery.DiscoveryInterface, client dynamic.Interface, mapper meta.RESTMapper) (map[coordinate][]coordinate, error) { +func buildRelatedObjectGraph(ctx context.Context, t *testing.T, discoveryClient discovery.DiscoveryInterface, client dynamic.Interface, mapper meta.RESTMapper) (map[coordinate][]coordinate, error) { + t.Helper() + // Discover all resource types resourceLists, err := discoveryClient.ServerPreferredResources() if err != nil { @@ -110,7 +112,7 @@ func buildRelatedObjectGraph(ctx context.Context, discoveryClient discovery.Disc group, version := parseAPIVersion(ref.APIVersion) rm, err := mapper.RESTMapping(schema.GroupKind{Group: group, Kind: ref.Kind}, version) if err != nil { - fmt.Printf("cannot find REST mapping for %v: %v\n", ref, err) + t.Logf("cannot find REST mapping for %v: %v\n", ref, err) continue } owner := coordinate{ @@ -141,7 +143,9 @@ func parseAPIVersion(apiVersion string) (group, version string) { // RelatedObjects returns all objects related to the supplied object through // ownership, i.e. the returned objects are transitively owned by obj, or // resource reference. -func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Object) ([]client.Object, error) { +func RelatedObjects(ctx context.Context, t *testing.T, config *rest.Config, objs ...client.Object) ([]client.Object, error) { + t.Helper() + dynClient, err := dynamic.NewForConfig(config) if err != nil { return nil, err @@ -159,7 +163,7 @@ func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Obj return nil, err } - ownershipGraph, err := buildRelatedObjectGraph(ctx, discoveryClient, dynClient, mapper) + ownershipGraph, err := buildRelatedObjectGraph(ctx, t, discoveryClient, dynClient, mapper) if err != nil { return nil, errors.Wrap(err, "cannot build ownership graph") } @@ -170,7 +174,7 @@ func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Obj gvk := obj.GetObjectKind().GroupVersionKind() rm, err := mapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) if err != nil { - fmt.Printf("cannot find REST mapping for %s: %v\n", gvk, err) + t.Logf("cannot find REST mapping for %s: %v\n", gvk, err) continue } @@ -181,15 +185,17 @@ func RelatedObjects(ctx context.Context, config *rest.Config, objs ...client.Obj }, seen)...) } - return loadCoordinates(ctx, dynClient, coords), nil + return loadCoordinates(ctx, t, dynClient, coords), nil } -func loadCoordinates(ctx context.Context, dynClient dynamic.Interface, coords []coordinate) []client.Object { +func loadCoordinates(ctx context.Context, t *testing.T, dynClient dynamic.Interface, coords []coordinate) []client.Object { + t.Helper() + ret := make([]client.Object, 0, len(coords)) for _, coord := range coords { other, err := dynClient.Resource(coord.GroupVersionResource).Namespace(coord.Namespace).Get(ctx, coord.Name, metav1.GetOptions{}) if err != nil { - fmt.Printf("cannot get %v: %v\n", coord, err) + t.Logf("cannot get %v: %v\n", coord, err) continue } ret = append(ret, other) diff --git a/test/e2e/funcs/env.go b/test/e2e/funcs/env.go index 2f834ef8e..9fbe3c2b2 100644 --- a/test/e2e/funcs/env.go +++ b/test/e2e/funcs/env.go @@ -70,13 +70,14 @@ func HelmUpgrade(o ...helm.Option) env.Func { // returns an error the calling test is failed with t.Fatal(err). func AsFeaturesFunc(fn env.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + ctx, err := fn(ctx, c) if err != nil { t.Fatal(err) } return ctx } - } // HelmUninstall uninstalls a Helm chart. @@ -104,6 +105,7 @@ func EnvFuncs(fns ...env.Func) env.Func { return func(ctx context.Context, c *envconf.Config) (context.Context, error) { for _, fn := range fns { var err error + //nolint:fatcontext // We want to pass the context down the chain. ctx, err = fn(ctx, c) if err != nil { return ctx, err @@ -115,11 +117,11 @@ func EnvFuncs(fns ...env.Func) env.Func { // CreateKindClusterWithConfig create kind cluster of the given name according to // configuration referred via configFilePath. -// The configuration is placed in test context afterward +// The configuration is placed in test context afterward. func CreateKindClusterWithConfig(clusterName, configFilePath string) env.Func { return EnvFuncs( envfuncs.CreateClusterWithConfig(kind.NewProvider(), clusterName, configFilePath), - func(ctx context.Context, config *envconf.Config) (context.Context, error) { + func(ctx context.Context, _ *envconf.Config) (context.Context, error) { b, err := os.ReadFile(filepath.Clean(configFilePath)) if err != nil { return ctx, err diff --git a/test/e2e/funcs/feature.go b/test/e2e/funcs/feature.go index f05a4b456..e0cb39704 100644 --- a/test/e2e/funcs/feature.go +++ b/test/e2e/funcs/feature.go @@ -66,11 +66,19 @@ const DefaultPollInterval = time.Millisecond * 500 type onSuccessHandler func(o k8s.Object) -// AllOf runs the supplied functions in order. +// AllOf runs the supplied functions in order. If a function fails the test and +// the environment is configured to fail fast (e2e-framework's -fail-fast flag) +// the remaining functions will not be run. func AllOf(fns ...features.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + for _, fn := range fns { + //nolint:fatcontext // We want to pass the context to each function. ctx = fn(ctx, t, c) + if t.Failed() && c.FailFast() { + break + } } return ctx } @@ -79,6 +87,8 @@ func AllOf(fns ...features.Func) features.Func { // InBackground runs the supplied function in a goroutine. func InBackground(fn features.Func) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + go fn(ctx, t, c) return ctx } @@ -109,6 +119,8 @@ func ReadyToTestWithin(d time.Duration, namespace string) features.Func { // not Available within the supplied duration. func DeploymentBecomesAvailableWithin(d time.Duration, namespace, name string) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dp := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} t.Logf("Waiting %s for deployment %s/%s to become Available...", d, dp.GetNamespace(), dp.GetName()) start := time.Now() @@ -123,10 +135,11 @@ func DeploymentBecomesAvailableWithin(d time.Duration, namespace, name string) f // ResourcesCreatedWithin fails a test if the supplied resources are not found // to exist within the supplied duration. -func ResourcesCreatedWithin(d time.Duration, dir, pattern string) features.Func { +func ResourcesCreatedWithin(d time.Duration, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern, options...) if err != nil { t.Error(err) return ctx @@ -154,10 +167,12 @@ func ResourcesCreatedWithin(d time.Duration, dir, pattern string) features.Func // exist within the supplied duration. func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + t.Logf("Waiting %s for %s to be created...", d, identifier(o)) start := time.Now() - if err := wait.For(conditions.New(c.Client().Resources()).ResourceMatch(o, func(object k8s.Object) bool { return true }), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { + if err := wait.For(conditions.New(c.Client().Resources()).ResourceMatch(o, func(_ k8s.Object) bool { return true }), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { t.Errorf("resource %s did not exist: %v", identifier(o), err) return ctx } @@ -169,10 +184,11 @@ func ResourceCreatedWithin(d time.Duration, o k8s.Object) features.Func { // ResourcesDeletedWithin fails a test if the supplied resources are not deleted // within the supplied duration. -func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func { +func ResourcesDeletedWithin(d time.Duration, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern, options...) if err != nil { t.Error(err) return ctx @@ -188,7 +204,7 @@ func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func start := time.Now() if err := wait.For(conditions.New(c.Client().Resources()).ResourcesDeleted(list), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { objs := itemsToObjects(list.Items) - related, _ := RelatedObjects(ctx, c.Client().RESTConfig(), objs...) + related, _ := RelatedObjects(ctx, t, c.Client().RESTConfig(), objs...) events := valueOrError(eventString(ctx, c.Client().RESTConfig(), append(objs, related...)...)) t.Errorf("resources not deleted: %v:\n\n%s\n%s\nRelated objects:\n\n%s\n", err, toYAML(objs...), events, toYAML(related...)) @@ -204,6 +220,8 @@ func ResourcesDeletedWithin(d time.Duration, dir, pattern string) features.Func // within the supplied duration. func ResourceDeletedWithin(d time.Duration, o k8s.Object) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + t.Logf("Waiting %s for %s to be deleted...", d, identifier(o)) start := time.Now() @@ -222,6 +240,8 @@ func ResourceDeletedWithin(d time.Duration, o k8s.Object) features.Func { // Comparison of conditions is modulo messages. func ResourcesHaveConditionWithin(d time.Duration, dir, pattern string, cds ...xpv1.Condition) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) if err != nil { t.Error(err) @@ -271,7 +291,7 @@ func ResourcesHaveConditionWithin(d time.Duration, dir, pattern string, cds ...x start := time.Now() if err := wait.For(conditions.New(c.Client().Resources()).ResourcesMatch(list, match), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { objs := itemsToObjects(list.Items) - related, _ := RelatedObjects(ctx, c.Client().RESTConfig(), objs...) + related, _ := RelatedObjects(ctx, t, c.Client().RESTConfig(), objs...) events := valueOrError(eventString(ctx, c.Client().RESTConfig(), append(objs, related...)...)) t.Errorf("resources did not have desired conditions: %s: %v:\n\n%s\n%s\nRelated objects:\n\n%s\n", desired, err, toYAML(objs...), events, toYAML(related...)) @@ -308,15 +328,16 @@ func (nf notFound) String() string { return "NotFound" } // NotFound is a special 'want' value that indicates the supplied path should // not be found. -var NotFound = notFound{} +var NotFound = notFound{} //nolint:gochecknoglobals // We treat this as a constant. // ResourcesHaveFieldValueWithin fails a test if the supplied resources do not // have the supplied value at the supplied field path within the supplied // duration. The supplied 'want' value must cmp.Equal the actual value. -func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, want any) features.Func { +func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, want any, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() - rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern) + rs, err := decoder.DecodeAllFiles(ctx, os.DirFS(dir), pattern, options...) if err != nil { t.Error(err) return ctx @@ -372,6 +393,8 @@ func ResourcesHaveFieldValueWithin(d time.Duration, dir, pattern, path string, w // duration. The supplied 'want' value must cmp.Equal the actual value. func ResourceHasFieldValueWithin(d time.Duration, o k8s.Object, path string, want any) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + t.Logf("Waiting %s for %s to have value %q at field path %s...", d, identifier(o), want, path) match := func(o k8s.Object) bool { @@ -411,6 +434,8 @@ func ResourceHasFieldValueWithin(d time.Duration, o k8s.Object, path string, wan // resource cannot be applied successfully. func ApplyResources(manager, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) files, _ := fs.Glob(dfs, pattern) @@ -432,9 +457,11 @@ func ApplyResources(manager, dir, pattern string, options ...decoder.DecodeOptio type claimCtxKey struct{} // ApplyClaim applies the claim stored in the given folder and file -// and stores it in the test context for later retrival if needed -func ApplyClaim(manager, dir, cm string) features.Func { +// and stores it in the test context for later retrival if needed. +func ApplyClaim(manager, dir, cm string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) files, _ := fs.Glob(dfs, cm) @@ -443,7 +470,7 @@ func ApplyClaim(manager, dir, cm string) features.Func { return ctx } - objs, err := decoder.DecodeAllFiles(ctx, dfs, cm) + objs, err := decoder.DecodeAllFiles(ctx, dfs, cm, options...) if err != nil { t.Error(err) return ctx @@ -469,15 +496,7 @@ func ApplyClaim(manager, dir, cm string) features.Func { // SetAnnotationMutateOption returns a DecodeOption that sets the supplied // annotation on the decoded object. func SetAnnotationMutateOption(key, value string) decoder.DecodeOption { - return decoder.MutateOption(func(o k8s.Object) error { - a := o.GetAnnotations() - if a == nil { - a = map[string]string{} - } - a[key] = value - o.SetAnnotations(a) - return nil - }) + return decoder.MutateAnnotations(map[string]string{key: value}) } // ResourcesFailToApply applies all manifests under the supplied directory that @@ -485,11 +504,13 @@ func SetAnnotationMutateOption(key, value string) decoder.DecodeOption { // fields are managed by the supplied field manager. It fails the test if any // supplied resource _can_ be applied successfully - use it to test that the API // server should reject a resource. -func ResourcesFailToApply(manager, dir, pattern string) features.Func { +func ResourcesFailToApply(manager, dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) - if err := decoder.DecodeEachFile(ctx, dfs, pattern, ApplyHandler(c.Client().Resources(), manager)); err == nil { + if err := decoder.DecodeEachFile(ctx, dfs, pattern, ApplyHandler(c.Client().Resources(), manager), options...); err == nil { // TODO(negz): Ideally we'd say which one. t.Error("Resource applied successfully, but should have failed") return ctx @@ -525,11 +546,13 @@ func ApplyHandler(r *resources.Resources, manager string, osh ...onSuccessHandle // DeleteResources deletes (from the environment) all resources defined by the // manifests under the supplied directory that match the supplied glob pattern // (e.g. *.yaml). -func DeleteResources(dir, pattern string) features.Func { +func DeleteResources(dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) - if err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources())); err != nil { + if err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources()), options...); err != nil { t.Fatal(err) return ctx } @@ -541,9 +564,11 @@ func DeleteResources(dir, pattern string) features.Func { } // ClaimUnderTestMustNotChangeWithin asserts that the claim available in -// the test context does not change within the given time +// the test context does not change within the given time. func ClaimUnderTestMustNotChangeWithin(d time.Duration) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm, ok := ctx.Value(claimCtxKey{}).(*claim.Unstructured) if !ok { t.Fatalf("claim not available in the context") @@ -574,9 +599,11 @@ func ClaimUnderTestMustNotChangeWithin(d time.Duration) features.Func { } // CompositeUnderTestMustNotChangeWithin asserts that the claim available in -// the test context does not change within the given time +// the test context does not change within the given time. func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm, ok := ctx.Value(claimCtxKey{}).(*claim.Unstructured) if !ok { t.Fatalf("claim not available in the context") @@ -619,12 +646,14 @@ func CompositeUnderTestMustNotChangeWithin(d time.Duration) features.Func { } // CompositeResourceMustMatchWithin assert that a composite referred by the given file -// must be matched by the given function within the given timeout -func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, match func(xr *composite.Unstructured) bool) features.Func { +// must be matched by the given function within the given timeout. +func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, match func(xr *composite.Unstructured) bool, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm := &claim.Unstructured{} - if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm); err != nil { + if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm, options...); err != nil { t.Error(err) return ctx } @@ -670,11 +699,13 @@ func CompositeResourceMustMatchWithin(d time.Duration, dir, claimFile string, ma // CompositeResourceHasFieldValueWithin asserts that the XR referred to by the // claim in the given file has the specified value at the specified path within // the specified time. -func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path string, want any) features.Func { +func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path string, want any, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm := &claim.Unstructured{} - if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm); err != nil { + if err := decoder.DecodeFile(os.DirFS(dir), claimFile, cm, options...); err != nil { t.Error(err) return ctx } @@ -739,10 +770,12 @@ func CompositeResourceHasFieldValueWithin(d time.Duration, dir, claimFile, path // ComposedResourcesHaveFieldValueWithin fails a test if the composed // resources created by the claim does not have the supplied value at the // supplied path within the supplied duration. -func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool) features.Func { //nolint:gocyclo // Not too much over. +func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path string, want any, filter func(object k8s.Object) bool, options ...decoder.DecodeOption) features.Func { //nolint:gocognit // Not too much over. return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + cm := &claim.Unstructured{} - if err := decoder.DecodeFile(os.DirFS(dir), file, cm); err != nil { + if err := decoder.DecodeFile(os.DirFS(dir), file, cm, options...); err != nil { t.Error(err) return ctx } @@ -830,6 +863,8 @@ func ComposedResourcesHaveFieldValueWithin(d time.Duration, dir, file, path stri // validation function within the supplied duration. func ListedResourcesValidatedWithin(d time.Duration, list k8s.ObjectList, min int, validate func(object k8s.Object) bool, listOptions ...resources.ListOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + if err := wait.For(conditions.New(c.Client().Resources()).ResourceListMatchN(list, min, validate, listOptions...), wait.WithTimeout(d), wait.WithInterval(DefaultPollInterval)); err != nil { y, _ := yaml.Marshal(list) t.Errorf("resources didn't pass validation: %v:\n\n%s\n\n", err, y) @@ -845,6 +880,8 @@ func ListedResourcesValidatedWithin(d time.Duration, list k8s.ObjectList, min in // is not deleted within the supplied duration. func ListedResourcesDeletedWithin(d time.Duration, list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + if err := c.Client().Resources().List(ctx, list, listOptions...); err != nil { return ctx } @@ -864,6 +901,8 @@ func ListedResourcesDeletedWithin(d time.Duration, list k8s.ObjectList, listOpti // not modified within the supplied duration. func ListedResourcesModifiedWith(list k8s.ObjectList, min int, modify func(object k8s.Object), listOptions ...resources.ListOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + if err := c.Client().Resources().List(ctx, list, listOptions...); err != nil { return ctx } @@ -897,8 +936,10 @@ func ListedResourcesModifiedWith(list k8s.ObjectList, min int, modify func(objec // LogResources polls the given kind of resources and logs creations, deletions // and changed conditions. -func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { //nolint:gocyclo // this is a test helper +func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) features.Func { //nolint:gocognit // this is a test helper return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + prev := map[string]map[xpv1.ConditionType]xpv1.Condition{} pollCtx, cancel := context.WithCancel(ctx) @@ -965,11 +1006,13 @@ func LogResources(list k8s.ObjectList, listOptions ...resources.ListOption) feat // defined by the manifests under the supplied directory that match the supplied // glob pattern (e.g. *.yaml) and verifies that they are blocked by the usage // webhook. -func DeletionBlockedByUsageWebhook(dir, pattern string) features.Func { +func DeletionBlockedByUsageWebhook(dir, pattern string, options ...decoder.DecodeOption) features.Func { return func(ctx context.Context, t *testing.T, c *envconf.Config) context.Context { + t.Helper() + dfs := os.DirFS(dir) - err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources())) + err := decoder.DecodeEachFile(ctx, dfs, pattern, decoder.DeleteHandler(c.Client().Resources()), options...) if err == nil { t.Fatal("expected the usage webhook to deny the request but deletion succeeded") return ctx @@ -1105,7 +1148,6 @@ func valueOrError(s string, err error) string { func itemsToObjects(items []unstructured.Unstructured) []client.Object { objects := make([]client.Object, len(items)) for i, item := range items { - item := item // unalias loop variable objects[i] = &item } return objects diff --git a/test/e2e/install_test.go b/test/e2e/install_test.go index 64e449cef..6b9d3844d 100644 --- a/test/e2e/install_test.go +++ b/test/e2e/install_test.go @@ -40,13 +40,6 @@ const LabelAreaLifecycle = "lifecycle" const TestSuiteLifecycle = "lifecycle" -// TestCrossplaneLifecycle tests two features expecting them to be run in order: -// - CrossplaneUninstall: Test that it's possible to cleanly uninstall Crossplane, even -// after having created and deleted a claim. -// - CrossplaneUpgrade: Test that it's possible to upgrade Crossplane from the most recent -// stable Helm chart to the one we're testing, even when a claim exists. This -// expects Crossplane not to be installed. -// // Note: First time Installation is tested as part of the environment setup, // if not disabled explicitly. func TestCrossplaneLifecycle(t *testing.T) { @@ -54,7 +47,7 @@ func TestCrossplaneLifecycle(t *testing.T) { environment.Test(t, // Test that it's possible to cleanly uninstall Crossplane, even after // having created and deleted a claim. - features.New(t.Name()+"Uninstall"). + features.NewWithDescription(t.Name()+"Uninstall", "Test that it's possible to cleanly uninstall Crossplane, even after having created and deleted a claim."). WithLabel(LabelArea, LabelAreaLifecycle). WithLabel(LabelSize, LabelSizeSmall). WithLabel(LabelModifyCrossplaneInstallation, LabelModifyCrossplaneInstallationTrue). @@ -97,7 +90,7 @@ func TestCrossplaneLifecycle(t *testing.T) { funcs.ResourceDeletedWithin(3*time.Minute, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}), )). Feature(), - features.New(t.Name()+"Upgrade"). + features.NewWithDescription(t.Name()+"Upgrade", "Test that it's possible to upgrade Crossplane from the most recent stable Helm chart to the one we're testing, even when a claim exists. This expects Crossplane not to be installed."). WithLabel(LabelArea, LabelAreaLifecycle). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 47803c775..605d3c58c 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -37,29 +37,27 @@ import ( "github.com/crossplane/crossplane/test/e2e/funcs" ) -// TODO(phisco): make it configurable +// TODO(phisco): make it configurable. const namespace = "crossplane-system" -// TODO(phisco): make it configurable +// TODO(phisco): make it configurable. const crdsDir = "cluster/crds" // The caller (e.g. make e2e) must ensure these exist. -// Run `make build e2e-tag-images` to produce them +// Run `make build e2e-tag-images` to produce them. const ( - // TODO(phisco): make it configurable + // TODO(phisco): make it configurable. imgcore = "crossplane-e2e/crossplane:latest" ) const ( - // TODO(phisco): make it configurable + // TODO(phisco): make it configurable. helmChartDir = "cluster/charts/crossplane" - // TODO(phisco): make it configurable + // TODO(phisco): make it configurable. helmReleaseName = "crossplane" ) -var ( - environment = config.NewEnvironmentFromFlags() -) +var environment = config.NewEnvironmentFromFlags() func TestMain(m *testing.M) { // TODO(negz): Global loggers are dumb and klog is dumb. Remove this when @@ -147,7 +145,10 @@ func TestMain(m *testing.M) { } // Check that all features are specifying a suite they belong to via LabelTestSuite. + //nolint:thelper // We can't make testing.T the second argument because we want to satisfy types.FeatureEnvFunc. environment.BeforeEachFeature(func(ctx context.Context, _ *envconf.Config, t *testing.T, feature features.Feature) (context.Context, error) { + t.Helper() + if _, exists := feature.Labels()[config.LabelTestSuite]; !exists { t.Fatalf("Feature %q does not have the required %q label set", feature.Name(), config.LabelTestSuite) } diff --git a/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml index 1ccd44a9e..0a45449ae 100644 --- a/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml +++ b/test/e2e/manifests/apiextensions/composition/functions/setup/composition.yaml @@ -36,6 +36,12 @@ spec: results: - severity: SEVERITY_NORMAL message: "I am doing a compose!" + credentials: + - name: important-secret + source: Secret + secretRef: + namespace: crossplane-system + name: super-secret - step: detect-readiness functionRef: name: function-auto-ready diff --git a/test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml b/test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml new file mode 100644 index 000000000..22e00c9ac --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/functions/setup/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + namespace: crossplane-system + name: super-secret +# We don't actually use the data, we just want to make sure we can load the +# secret. +data: {} \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml new file mode 100644 index 000000000..6c9c38716 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/composition.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: parent +spec: + compositeTypeRef: + apiVersion: example.org/v1alpha1 + kind: XParent + resources: + - name: child + base: + apiVersion: example.org/v1alpha1 + kind: XChild + spec: {} + patches: + - type: FromCompositeFieldPath + # this is going to be 1 + fromFieldPath: spec.someField + # this will fail because it's supposed to be > 1 + toFieldPath: spec.someField + - name: nop-resource-1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + metadata: + annotations: + exampleVal: "foo" + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 1s + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + # we should still see this in the child + toFieldPath: metadata.annotations[something] + - type: ToCompositeFieldPath + fromFieldPath: metadata.annotations[exampleVal] + # we should still see this in the composite + toFieldPath: metadata.annotations[exampleVal] +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: child +spec: + compositeTypeRef: + apiVersion: example.org/v1alpha1 + kind: XChild + resources: + # we don't really care about what happens here, it's not going to work + # because the composite resource will be invalid + - name: nop-resource-1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 1s + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: metadata.annotations[something] diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml new file mode 100644 index 000000000..192c76708 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/definition.yaml @@ -0,0 +1,51 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xparents.example.org +spec: + defaultCompositionRef: + name: parent + group: example.org + names: + kind: XParent + plural: xparents + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + someField: + # no limits on its value + type: integer +--- +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xchildren.example.org +spec: + defaultCompositionRef: + name: child + group: example.org + names: + kind: XChild + plural: xchildren + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + someField: + minimum: 2 + type: integer diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml new file mode 100644 index 000000000..b82f2c560 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/setup/provider.yaml @@ -0,0 +1,7 @@ +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-nop +spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 + ignoreCrossplaneConstraints: true diff --git a/test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml b/test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml new file mode 100644 index 000000000..a233b2916 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/invalid-composed/xr.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: example.org/v1alpha1 +kind: XParent +metadata: + name: test +# Expected: +# annotations: +# exampleVal: "foo" +spec: + # this should be > 1 in the XChild composed resource, so it will fail applying it + someField: 1 diff --git a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml new file mode 100644 index 000000000..70646fe58 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/composition-xfn.yaml @@ -0,0 +1,39 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: xnopresources.nop.example.org +spec: + compositeTypeRef: + apiVersion: nop.example.org/v1alpha1 + kind: XNopResource + mode: Pipeline + pipeline: + - functionRef: + name: function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionStatus: "True" + conditionType: Ready + time: 0s + fields: + tags: {} + name: nop-resource-1 + patches: + - fromFieldPath: spec.coolField + toFieldPath: metadata.annotations["cf"] + type: FromCompositeFieldPath + - fromFieldPath: metadata.annotations["cf"] + toFieldPath: status.coolerField + type: ToCompositeFieldPath + - fromFieldPath: spec.parameters.tags + toFieldPath: spec.forProvider.fields.tags + type: FromCompositeFieldPath + step: patch-and-transform \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml index e8d925799..8c407842a 100644 --- a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml +++ b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/composition.yaml @@ -17,6 +17,8 @@ spec: - conditionType: Ready conditionStatus: "True" time: 0s + fields: + tags: {} patches: - type: FromCompositeFieldPath fromFieldPath: spec.coolField @@ -24,3 +26,6 @@ spec: - type: ToCompositeFieldPath fromFieldPath: metadata.annotations["cf"] toFieldPath: status.coolerField + - fromFieldPath: spec.parameters.tags + toFieldPath: spec.forProvider.fields.tags + type: FromCompositeFieldPath \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml new file mode 100644 index 000000000..1c5f98aa7 --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/propagate-field-removals/setup/functions.yaml @@ -0,0 +1,6 @@ +apiVersion: pkg.crossplane.io/v1beta1 +kind: Function +metadata: + name: function-patch-and-transform +spec: + package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.5.0 \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml b/test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml new file mode 100644 index 000000000..1b0e46ffb --- /dev/null +++ b/test/e2e/manifests/apiextensions/composition/validation/composition-invalid-immutable.yaml @@ -0,0 +1,36 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: valid + annotations: + crossplane.io/composition-schema-aware-validation-mode: strict +spec: + compositeTypeRef: + apiVersion: nop.example.org/v1alpha1 + kind: NopResource # <-- invalid, field is immutable + resources: + - name: nop-resource-1 + base: + apiVersion: nop.crossplane.io/v1alpha1 + kind: NopResource + spec: + forProvider: + conditionAfter: + - conditionType: Ready + conditionStatus: "False" + time: 0s + - conditionType: Ready + conditionStatus: "True" + time: 1s + patches: + - type: FromCompositeFieldPath + fromFieldPath: spec.coolField + toFieldPath: metadata.annotations[cool-field] + transforms: + - type: string + string: + type: Convert + convert: ToUpper + - type: ToCompositeFieldPath + fromFieldPath: metadata.annotations[cool-field] + toFieldPath: status.coolerField \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml index 6c54d3b9b..e1cd38bcc 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-by/usage.yaml @@ -3,6 +3,7 @@ kind: Usage metadata: name: using-uses-used spec: + replayDeletion: true of: apiVersion: nop.crossplane.io/v1alpha1 kind: NopResource diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml index da0ec9f17..3fa17b096 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-by/used.yaml @@ -7,9 +7,6 @@ metadata: spec: forProvider: conditionAfter: - - conditionType: "Synced" - conditionStatus: "True" - time: "5s" - conditionType: "Ready" conditionStatus: "True" time: "10s" \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml index b182b1d5a..62a6edcac 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-by/using.yaml @@ -5,9 +5,6 @@ metadata: spec: forProvider: conditionAfter: - - conditionType: "Synced" - conditionStatus: "True" - time: "5s" - conditionType: "Ready" conditionStatus: "True" time: "10s" \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml b/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml index ace8b8701..af0060395 100644 --- a/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml +++ b/test/e2e/manifests/apiextensions/usage/standalone/with-reason/used.yaml @@ -7,9 +7,6 @@ metadata: spec: forProvider: conditionAfter: - - conditionType: "Synced" - conditionStatus: "True" - time: "5s" - conditionType: "Ready" conditionStatus: "True" time: "10s" \ No newline at end of file diff --git a/test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml b/test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml new file mode 100644 index 000000000..cdaab70dd --- /dev/null +++ b/test/e2e/manifests/apiextensions/xrd/validation/xrd-immutable-updated.yaml @@ -0,0 +1,31 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xnopresources.nop.example.org +spec: + group: nope.example.org # <-- invalid, field is immutable + names: + kind: XNopResource + plural: xnopresources + claimNames: + kind: NopResource + plural: nopresources + connectionSecretKeys: + - test + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + coolField: + type: string + coolerField: + type: string + required: + - coolField \ No newline at end of file diff --git a/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml b/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml index c647aba4e..2494100d9 100644 --- a/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml +++ b/test/e2e/manifests/pkg/configuration/private/pull-secret.yaml @@ -6,7 +6,7 @@ # --docker-username=_json_key \ # --docker-password="$(cat service-account-key.json)" # -# We're okay with having these credentials checked in in plaintext because the +# We're okay with having these credentials checked in plaintext because the # service account they grant access to only has permission to pull from the # xp-install-test Google Artifact Registry, which contains only the empty test # Configuration defined by crossplane.yaml. diff --git a/test/e2e/pkg_test.go b/test/e2e/pkg_test.go index 9355b0a72..34dc6daa4 100644 --- a/test/e2e/pkg_test.go +++ b/test/e2e/pkg_test.go @@ -37,13 +37,11 @@ import ( // Providers, Configurations, etc). const LabelAreaPkg = "pkg" -// TestConfigurationPullFromPrivateRegistry tests that a Configuration can be -// installed from a private registry using a package pull secret. func TestConfigurationPullFromPrivateRegistry(t *testing.T) { manifests := "test/e2e/manifests/pkg/configuration/private" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that a Configuration can be installed from a private registry using a package pull secret."). WithLabel(LabelArea, LabelAreaPkg). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -59,13 +57,11 @@ func TestConfigurationPullFromPrivateRegistry(t *testing.T) { ) } -// TestConfigurationWithDependency tests that a Configuration with a dependency -// on a Provider will become healthy when the Provider becomes healthy. func TestConfigurationWithDependency(t *testing.T) { manifests := "test/e2e/manifests/pkg/configuration/dependency" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that a Configuration with a dependency on a Provider will become healthy when the Provider becomes healthy."). WithLabel(LabelArea, LabelAreaPkg). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -90,12 +86,10 @@ func TestConfigurationWithDependency(t *testing.T) { } func TestProviderUpgrade(t *testing.T) { - // Test that we can upgrade a provider to a new version, even when a managed - // resource has been created. manifests := "test/e2e/manifests/pkg/provider" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that we can upgrade a provider to a new version, even when a managed resource has been created."). WithLabel(LabelArea, LabelAreaPkg). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -126,7 +120,7 @@ func TestProviderUpgrade(t *testing.T) { func TestDeploymentRuntimeConfig(t *testing.T) { manifests := "test/e2e/manifests/pkg/deployment-runtime-config" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that custom configurations in the deployment runtime do not disrupt the functionality of the resources, ensuring that deployments, services, and service accounts are created and configured correctly according to the specified runtime settings."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). @@ -193,7 +187,7 @@ func TestDeploymentRuntimeConfig(t *testing.T) { func TestExternallyManagedServiceAccount(t *testing.T) { manifests := "test/e2e/manifests/pkg/externally-managed-service-account" environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests that an externally managed service account is not owned by the deployment while verifying that the deployment correctly references the service account as specified in the runtime configuration."). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). WithLabel(config.LabelTestSuite, config.TestSuiteDefault). diff --git a/test/e2e/realtimecompositions_test.go b/test/e2e/realtimecompositions_test.go index ad49d5c59..f206c57fd 100644 --- a/test/e2e/realtimecompositions_test.go +++ b/test/e2e/realtimecompositions_test.go @@ -54,7 +54,7 @@ func TestRealtimeCompositions(t *testing.T) { withTestLabels := resources.WithLabelSelector(labels.FormatLabels(map[string]string{"realtime-compositions": "true"})) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests scenarios for compositions with realtime reconciles through MR updates."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). diff --git a/test/e2e/usage_test.go b/test/e2e/usage_test.go index 9fcb0608e..2f7826708 100644 --- a/test/e2e/usage_test.go +++ b/test/e2e/usage_test.go @@ -59,10 +59,8 @@ func TestUsageStandalone(t *testing.T) { funcs.DeleteResources(manifests, "with-by/using.yaml"), funcs.ResourcesDeletedWithin(30*time.Second, manifests, "with-by/using.yaml"), funcs.ResourcesDeletedWithin(30*time.Second, manifests, "with-by/usage.yaml"), - - // Deletion of used resource should be allowed after usage is cleared. - funcs.DeleteResources(manifests, "with-by/used.yaml"), - funcs.ResourcesDeletedWithin(30*time.Second, manifests, "with-by/used.yaml"), + // We have "replayDeletion: true" on the usage, deletion of used resource should be replayed after usage is cleared. + funcs.ResourcesDeletedWithin(1*time.Minute, manifests, "with-by/used.yaml"), ), }, { @@ -118,8 +116,6 @@ func TestUsageStandalone(t *testing.T) { ) } -// TestUsageComposition tests scenarios for Crossplane's `Usage` resource as part -// of a composition. func TestUsageComposition(t *testing.T) { manifests := "test/e2e/manifests/apiextensions/usage/composition" @@ -129,7 +125,7 @@ func TestUsageComposition(t *testing.T) { })) environment.Test(t, - features.New(t.Name()). + features.NewWithDescription(t.Name(), "Tests scenarios for Crossplane's `Usage` resource as part of a composition."). WithLabel(LabelStage, LabelStageAlpha). WithLabel(LabelArea, LabelAreaAPIExtensions). WithLabel(LabelSize, LabelSizeSmall). diff --git a/test/e2e/utils/cert.go b/test/e2e/utils/cert.go index d327fe80c..ff49f1e30 100644 --- a/test/e2e/utils/cert.go +++ b/test/e2e/utils/cert.go @@ -27,7 +27,7 @@ import ( ) // CreateCert create TLS certificate for given dns name -// and returns CA and key in PEM format, or an error +// and returns CA and key in PEM format, or an error. func CreateCert(dnsName string) (string, string, error) { ca := &x509.Certificate{ SerialNumber: big.NewInt(2019), diff --git a/test/e2e/xrd_validation_test.go b/test/e2e/xrd_validation_test.go index 952dde1a1..55c2e41c5 100644 --- a/test/e2e/xrd_validation_test.go +++ b/test/e2e/xrd_validation_test.go @@ -41,6 +41,12 @@ func TestXRDValidation(t *testing.T) { Description: "An invalid update to an XRD should be rejected.", Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "xrd-valid-updated-invalid.yaml"), }, + { + // An update to immutable XRD fields should be rejected. + Name: "ImmutableXRDFieldUpdateIsRejected", + Description: "An update to immutable XRD field should be rejected.", + Assessment: funcs.ResourcesFailToApply(FieldManager, manifests, "xrd-immutable-updated.yaml"), + }, { // An invalid XRD should be rejected. Name: "InvalidXRDIsRejected",