From adffb2b34f44ac6aaffd2dc3a2169cabd18f8faf Mon Sep 17 00:00:00 2001 From: Justin Ross Date: Fri, 23 Feb 2024 07:45:59 -0500 Subject: [PATCH] Skewer update --- .github/workflows/main.yaml | 41 +- .gitignore | 2 +- .../test-centos-7.dockerfile => .plano.py | 12 +- .planofile | 1 - README.md | 412 +- external/skewer/.github/workflows/main.yaml | 28 + external/skewer/.gitignore | 4 + external/skewer/.plano.py | 68 + {subrepos => external}/skewer/LICENSE.txt | 0 external/skewer/README.md | 287 + .../skewer/config/.github/workflows/main.yaml | 46 +- .../skewer/config}/.gitignore | 1 + external/skewer/config/.plano.py | 20 + .../skewer/example}/.gitignore | 2 +- external/skewer/example/.plano.py | 20 + external/skewer/example/README.md | 357 + .../test => external/skewer/example/plano | 13 +- external/skewer/example/python/plano | 1 + external/skewer/example/python/skewer | 1 + external/skewer/example/skewer.yaml | 37 + .../plano/.github/workflows/main.yaml | 48 + .../skewer/external}/plano/.gitignore | 3 +- .../skewer/external}/plano/LICENSE.txt | 0 external/skewer/external/plano/MANIFEST.in | 1 + external/skewer/external/plano/Makefile | 70 + external/skewer/external/plano/README.md | 90 + .../skewer/external}/plano/bin/plano | 8 +- .../skewer/external/plano/bin/plano-test | 8 +- .../skewer/external}/plano/docs/conf.py | 0 .../skewer/external}/plano/docs/index.rst | 0 external/skewer/external/plano/pyproject.toml | 23 + .../external/plano/src/plano/__init__.py | 12 +- .../plano/src/plano/_testproject/.plano.py | 112 + .../_testproject/src/chucker/__init__.py | 0 .../_testproject/src/chucker/moretests.py | 12 +- .../plano/_testproject/src/chucker/tests.py | 70 + .../skewer/external/plano/src/plano/_tests.py | 443 +- .../external/plano/src/plano/command.py | 511 + .../skewer/external/plano/src/plano/github.py | 80 + .../skewer/external/plano/src/plano/main.py | 1788 + .../skewer/external/plano/src/plano/test.py | 428 + .../skewer/plano | 12 +- external/skewer/python/plano | 1 + external/skewer/python/skewer/__init__.py | 20 + external/skewer/python/skewer/main.py | 731 + .../skewer/python/skewer/planocommands.py | 91 + .../skewer/python/skewer/standardsteps.yaml | 330 + .../skewer/python/skewer/standardtext.yaml | 49 + external/skewer/python/skewer/tests.py | 67 + frontend/kubernetes.yaml | 30 +- kafka-cluster/cluster1.yaml | 56 +- kafka-cluster/strimzi.yaml | 31190 ++++++++-------- plano | 29 +- python/plano | 1 + python/skewer | 1 + skewer.yaml | 66 +- subrepos/skewer/.github/workflows/main.yaml | 22 - subrepos/skewer/.gitrepo | 12 - subrepos/skewer/.planofile | 23 - subrepos/skewer/README.md | 261 - .../skewer/config/.github/workflows/main.yaml | 22 - subrepos/skewer/config/.planofile | 62 - subrepos/skewer/plano | 1 - subrepos/skewer/python/plano.py | 1 - subrepos/skewer/python/skewer.py | 652 - .../plano/.github/workflows/main.yaml | 12 - subrepos/skewer/subrepos/plano/.gitrepo | 12 - subrepos/skewer/subrepos/plano/Makefile | 114 - subrepos/skewer/subrepos/plano/README.md | 13 - subrepos/skewer/subrepos/plano/bin/planosh | 34 - .../skewer/subrepos/plano/python/bullseye.py | 319 - .../subrepos/plano/python/bullseye.strings | 221 - .../subrepos/plano/python/bullseye_tests.py | 145 - .../skewer/subrepos/plano/python/plano.py | 2366 -- .../skewer/subrepos/plano/scripts/devel.sh | 2 - .../plano/scripts/test-bootstrap.dockerfile | 46 - subrepos/skewer/subrepos/plano/setup.py | 80 - .../subrepos/plano/test-project/Planofile | 75 - .../plano/test-project/bin/chucker.in | 1 - .../plano/test-project/files/notes.txt | 0 .../plano/test-project/python/chucker.py | 0 .../test-project/python/chucker_tests.py | 35 - .../plano/test-project/python/flipper.py | 0 .../test-example/.github/workflows/main.yaml | 22 - subrepos/skewer/test-example/.planofile | 1 - subrepos/skewer/test-example/README.md | 471 - .../skewer/test-example/images/entities.svg | 3 - .../skewer/test-example/images/sequence.svg | 1 - .../skewer/test-example/images/sequence.txt | 22 - subrepos/skewer/test-example/plano | 1 - subrepos/skewer/test-example/python/skewer.py | 1 - subrepos/skewer/test-example/skewer.yaml | 103 - subrepos/skewer/test-example/subrepos/skewer | 1 - 93 files changed, 22179 insertions(+), 20712 deletions(-) rename subrepos/skewer/subrepos/plano/scripts/test-centos-7.dockerfile => .plano.py (75%) delete mode 120000 .planofile create mode 100644 external/skewer/.github/workflows/main.yaml create mode 100644 external/skewer/.gitignore create mode 100644 external/skewer/.plano.py rename {subrepos => external}/skewer/LICENSE.txt (100%) create mode 100644 external/skewer/README.md rename subrepos/skewer/subrepos/plano/bin/plano-self-test.in => external/skewer/config/.github/workflows/main.yaml (51%) mode change 100755 => 100644 rename {subrepos/skewer/test-example => external/skewer/config}/.gitignore (50%) create mode 100644 external/skewer/config/.plano.py rename {subrepos/skewer => external/skewer/example}/.gitignore (50%) create mode 100644 external/skewer/example/.plano.py create mode 100644 external/skewer/example/README.md rename subrepos/skewer/subrepos/plano/scripts/test => external/skewer/example/plano (80%) create mode 120000 external/skewer/example/python/plano create mode 120000 external/skewer/example/python/skewer create mode 100644 external/skewer/example/skewer.yaml create mode 100644 external/skewer/external/plano/.github/workflows/main.yaml rename {subrepos/skewer/subrepos => external/skewer/external}/plano/.gitignore (64%) rename {subrepos/skewer/subrepos => external/skewer/external}/plano/LICENSE.txt (100%) create mode 100644 external/skewer/external/plano/MANIFEST.in create mode 100644 external/skewer/external/plano/Makefile create mode 100644 external/skewer/external/plano/README.md rename {subrepos/skewer/subrepos => external/skewer/external}/plano/bin/plano (79%) rename subrepos/skewer/subrepos/plano/bin/planotest => external/skewer/external/plano/bin/plano-test (79%) rename {subrepos/skewer/subrepos => external/skewer/external}/plano/docs/conf.py (100%) rename {subrepos/skewer/subrepos => external/skewer/external}/plano/docs/index.rst (100%) create mode 100644 external/skewer/external/plano/pyproject.toml rename subrepos/skewer/subrepos/plano/scripts/test-ubuntu.dockerfile => external/skewer/external/plano/src/plano/__init__.py (77%) create mode 100644 external/skewer/external/plano/src/plano/_testproject/.plano.py rename subrepos/skewer/subrepos/plano/test-project/bin/chucker-test => external/skewer/external/plano/src/plano/_testproject/src/chucker/__init__.py (100%) rename subrepos/skewer/subrepos/plano/scripts/test-centos-8.dockerfile => external/skewer/external/plano/src/plano/_testproject/src/chucker/moretests.py (77%) create mode 100644 external/skewer/external/plano/src/plano/_testproject/src/chucker/tests.py rename subrepos/skewer/subrepos/plano/python/plano_tests.py => external/skewer/external/plano/src/plano/_tests.py (70%) create mode 100644 external/skewer/external/plano/src/plano/command.py create mode 100644 external/skewer/external/plano/src/plano/github.py create mode 100644 external/skewer/external/plano/src/plano/main.py create mode 100644 external/skewer/external/plano/src/plano/test.py rename subrepos/skewer/subrepos/plano/scripts/test-fedora.dockerfile => external/skewer/plano (79%) mode change 100644 => 100755 create mode 120000 external/skewer/python/plano create mode 100644 external/skewer/python/skewer/__init__.py create mode 100644 external/skewer/python/skewer/main.py create mode 100644 external/skewer/python/skewer/planocommands.py create mode 100644 external/skewer/python/skewer/standardsteps.yaml create mode 100644 external/skewer/python/skewer/standardtext.yaml create mode 100644 external/skewer/python/skewer/tests.py mode change 120000 => 100755 plano create mode 120000 python/plano create mode 120000 python/skewer delete mode 100644 subrepos/skewer/.github/workflows/main.yaml delete mode 100644 subrepos/skewer/.gitrepo delete mode 100644 subrepos/skewer/.planofile delete mode 100644 subrepos/skewer/README.md delete mode 100644 subrepos/skewer/config/.github/workflows/main.yaml delete mode 100644 subrepos/skewer/config/.planofile delete mode 120000 subrepos/skewer/plano delete mode 120000 subrepos/skewer/python/plano.py delete mode 100644 subrepos/skewer/python/skewer.py delete mode 100644 subrepos/skewer/subrepos/plano/.github/workflows/main.yaml delete mode 100644 subrepos/skewer/subrepos/plano/.gitrepo delete mode 100644 subrepos/skewer/subrepos/plano/Makefile delete mode 100644 subrepos/skewer/subrepos/plano/README.md delete mode 100755 subrepos/skewer/subrepos/plano/bin/planosh delete mode 100644 subrepos/skewer/subrepos/plano/python/bullseye.py delete mode 100644 subrepos/skewer/subrepos/plano/python/bullseye.strings delete mode 100644 subrepos/skewer/subrepos/plano/python/bullseye_tests.py delete mode 100644 subrepos/skewer/subrepos/plano/python/plano.py delete mode 100644 subrepos/skewer/subrepos/plano/scripts/devel.sh delete mode 100644 subrepos/skewer/subrepos/plano/scripts/test-bootstrap.dockerfile delete mode 100755 subrepos/skewer/subrepos/plano/setup.py delete mode 100644 subrepos/skewer/subrepos/plano/test-project/Planofile delete mode 100644 subrepos/skewer/subrepos/plano/test-project/bin/chucker.in delete mode 100644 subrepos/skewer/subrepos/plano/test-project/files/notes.txt delete mode 100644 subrepos/skewer/subrepos/plano/test-project/python/chucker.py delete mode 100644 subrepos/skewer/subrepos/plano/test-project/python/chucker_tests.py delete mode 100644 subrepos/skewer/subrepos/plano/test-project/python/flipper.py delete mode 100644 subrepos/skewer/test-example/.github/workflows/main.yaml delete mode 120000 subrepos/skewer/test-example/.planofile delete mode 100644 subrepos/skewer/test-example/README.md delete mode 100644 subrepos/skewer/test-example/images/entities.svg delete mode 100644 subrepos/skewer/test-example/images/sequence.svg delete mode 100644 subrepos/skewer/test-example/images/sequence.txt delete mode 120000 subrepos/skewer/test-example/plano delete mode 120000 subrepos/skewer/test-example/python/skewer.py delete mode 100644 subrepos/skewer/test-example/skewer.yaml delete mode 120000 subrepos/skewer/test-example/subrepos/skewer diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index e32360c..db00f3c 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -1,3 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + name: main on: push: @@ -6,17 +25,23 @@ on: - cron: "0 0 * * 0" jobs: test: + strategy: + fail-fast: false + matrix: + skupper-version: [latest, main] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: "3.x" - - uses: manusa/actions-setup-minikube@v2.6.0 + - uses: manusa/actions-setup-minikube@v2.10.0 with: - minikube version: "v1.25.2" - kubernetes version: "v1.24.1" - github token: ${{ secrets.GITHUB_TOKEN }} - - run: curl -f https://skupper.io/install.sh | sh - - run: echo "$HOME/.local/bin" >> $GITHUB_PATH + minikube version: "v1.32.0" + kubernetes version: "v1.29.0" + github token: ${{secrets.GITHUB_TOKEN}} + - run: curl https://skupper.io/install.sh | bash -s -- --version ${{matrix.skupper-version}} + - run: echo "$HOME/.local/bin" >> "$GITHUB_PATH" - run: ./plano test + env: + PLANO_COLOR: 1 diff --git a/.gitignore b/.gitignore index 049482c..500983c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -__pycache__/ /README.html +__pycache__/ diff --git a/subrepos/skewer/subrepos/plano/scripts/test-centos-7.dockerfile b/.plano.py similarity index 75% rename from subrepos/skewer/subrepos/plano/scripts/test-centos-7.dockerfile rename to .plano.py index 89d107e..4609d49 100644 --- a/subrepos/skewer/subrepos/plano/scripts/test-centos-7.dockerfile +++ b/.plano.py @@ -17,14 +17,4 @@ # under the License. # -FROM centos:7 - -RUN yum -q -y update && yum -q clean all - -RUN yum -y install epel-release - -RUN yum -y install make python2-pyyaml python36 python36-PyYAML - -COPY . /root/plano -WORKDIR /root/plano -CMD ["make", "clean", "test", "install", "PREFIX=/usr/local"] +from skewer.planocommands import * diff --git a/.planofile b/.planofile deleted file mode 120000 index 46de17c..0000000 --- a/.planofile +++ /dev/null @@ -1 +0,0 @@ -subrepos/skewer/config/.planofile \ No newline at end of file diff --git a/README.md b/README.md index bf9fbd4..a89812d 100644 --- a/README.md +++ b/README.md @@ -12,23 +12,22 @@ across cloud providers, data centers, and edge sites. [website]: https://skupper.io/ [examples]: https://skupper.io/examples/index.html - #### Contents * [Overview](#overview) * [Prerequisites](#prerequisites) -* [Step 1: Configure separate console sessions](#step-1-configure-separate-console-sessions) -* [Step 2: Access your clusters](#step-2-access-your-clusters) -* [Step 3: Set up your namespaces](#step-3-set-up-your-namespaces) -* [Step 4: Install Skupper in your namespaces](#step-4-install-skupper-in-your-namespaces) -* [Step 5: Check the status of your namespaces](#step-5-check-the-status-of-your-namespaces) -* [Step 6: Link your namespaces](#step-6-link-your-namespaces) -* [Step 7: Deploy the Kafka cluster](#step-7-deploy-the-kafka-cluster) -* [Step 8: Expose the Kafka cluster](#step-8-expose-the-kafka-cluster) -* [Step 9: Deploy the application services](#step-9-deploy-the-application-services) -* [Step 10: Test the application](#step-10-test-the-application) -* [Accessing the web console](#accessing-the-web-console) +* [Step 1: Install the Skupper command-line tool](#step-1-install-the-skupper-command-line-tool) +* [Step 2: Set up your namespaces](#step-2-set-up-your-namespaces) +* [Step 3: Deploy the Kafka cluster](#step-3-deploy-the-kafka-cluster) +* [Step 4: Deploy the application services](#step-4-deploy-the-application-services) +* [Step 5: Create your sites](#step-5-create-your-sites) +* [Step 6: Link your sites](#step-6-link-your-sites) +* [Step 7: Expose the Kafka cluster](#step-7-expose-the-kafka-cluster) +* [Step 8: Access the frontend](#step-8-access-the-frontend) * [Cleaning up](#cleaning-up) +* [Summary](#summary) +* [Next steps](#next-steps) +* [About this example](#about-this-example) ## Overview @@ -64,26 +63,43 @@ to represent the private data center and public cloud. ## Prerequisites - * The `kubectl` command-line tool, version 1.15 or later ([installation guide][install-kubectl]) -* The `skupper` command-line tool, the latest version ([installation - guide][install-skupper]) - -* Access to at least one Kubernetes cluster, from any provider you - choose +* Access to at least one Kubernetes cluster, from [any provider you + choose][kube-providers] [install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ -[install-skupper]: https://skupper.io/install/index.html +[kube-providers]: https://skupper.io/start/kubernetes.html + +## Step 1: Install the Skupper command-line tool + +This example uses the Skupper command-line tool to deploy Skupper. +You need to install the `skupper` command only once for each +development environment. + +On Linux or Mac, you can use the install script (inspect it +[here][install-script]) to download and extract the command: + +~~~ shell +curl https://skupper.io/install.sh | sh +~~~ + +The script installs the command under your home directory. It +prompts you to add the command to your path if necessary. + +For Windows and other installation options, see [Installing +Skupper][install-docs]. +[install-script]: https://github.com/skupperproject/skupper-website/blob/main/input/install.sh +[install-docs]: https://skupper.io/install/ -## Step 1: Configure separate console sessions +## Step 2: Set up your namespaces -Skupper is designed for use with multiple namespaces, typically on -different clusters. The `skupper` command uses your -[kubeconfig][kubeconfig] and current context to select the -namespace where it operates. +Skupper is designed for use with multiple Kubernetes namespaces, +usually on different clusters. The `skupper` and `kubectl` +commands use your [kubeconfig][kubeconfig] and current context to +select the namespace where they operate. [kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ @@ -95,92 +111,119 @@ A single kubeconfig supports only one active context per user. Since you will be using multiple contexts at once in this exercise, you need to create distinct kubeconfigs. -Start a console session for each of your namespaces. Set the -`KUBECONFIG` environment variable to a different path in each -session. +For each namespace, open a new terminal window. In each terminal, +set the `KUBECONFIG` environment variable to a different path and +log in to your cluster. Then create the namespace you wish to use +and set the namespace on your current context. -_**Console for public:**_ +**Note:** The login procedure varies by provider. See the +documentation for yours: + +* [Minikube](https://skupper.io/start/minikube.html#cluster-access) +* [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html#cluster-access) +* [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html#cluster-access) +* [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html#cluster-access) +* [IBM Kubernetes Service](https://skupper.io/start/ibmks.html#cluster-access) +* [OpenShift](https://skupper.io/start/openshift.html#cluster-access) + +_**Public:**_ ~~~ shell export KUBECONFIG=~/.kube/config-public +# Enter your provider-specific login command +kubectl create namespace public +kubectl config set-context --current --namespace public ~~~ -_**Console for private:**_ +_**Private:**_ ~~~ shell export KUBECONFIG=~/.kube/config-private +# Enter your provider-specific login command +kubectl create namespace private +kubectl config set-context --current --namespace private ~~~ -## Step 2: Access your clusters - -The methods for accessing your clusters vary by Kubernetes -provider. Find the instructions for your chosen providers and use -them to authenticate and configure access for each console -session. See the following links for more information: - -* [Minikube](https://skupper.io/start/minikube.html) -* [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html) -* [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html) -* [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html) -* [IBM Kubernetes Service](https://skupper.io/start/ibmks.html) -* [OpenShift](https://skupper.io/start/openshift.html) -* [More providers](https://kubernetes.io/partners/#kcsp) +## Step 3: Deploy the Kafka cluster -## Step 3: Set up your namespaces +In Private, use the `kubectl create` and `kubectl apply` +commands with the listed YAML files to install the operator and +deploy the cluster and topic. -Use `kubectl create namespace` to create the namespaces you wish -to use (or use existing namespaces). Use `kubectl config -set-context` to set the current namespace for each session. - -_**Console for public:**_ +_**Private:**_ ~~~ shell -kubectl create namespace public -kubectl config set-context --current --namespace public +kubectl create -f kafka-cluster/strimzi.yaml +kubectl apply -f kafka-cluster/cluster1.yaml +kubectl wait --for condition=ready --timeout 900s kafka/cluster1 ~~~ -_Sample output:_ +**Note:** -~~~ console -$ kubectl create namespace public -namespace/public created +By default, the Kafka bootstrap server returns broker addresses +that include the Kubernetes namespace in their domain name. +When, as in this example, the Kafka client is running in a +namespace with a different name from that of the Kafka cluster, +this prevents the client from resolving the Kafka brokers. + +To make the Kafka brokers reachable, set the `advertisedHost` +property of each broker to a domain name that the Kafka client +can resolve at the remote site. In this example, this is +achieved with the following listener configuration: -$ kubectl config set-context --current --namespace public -Context "minikube" modified. +~~~ yaml +spec: + kafka: + listeners: + - name: plain + port: 9092 + type: internal + tls: false + configuration: + brokers: + - broker: 0 + advertisedHost: cluster1-kafka-0.cluster1-kafka-brokers ~~~ -_**Console for private:**_ +See [Advertised addresses for brokers][advertised-addresses] for +more information. + +[advertised-addresses]: https://strimzi.io/docs/operators/in-development/configuring.html#property-listener-config-broker-reference -~~~ shell -kubectl create namespace private -kubectl config set-context --current --namespace private -~~~ +## Step 4: Deploy the application services -_Sample output:_ +In Public, use the `kubectl apply` command with the listed YAML +files to install the application services. -~~~ console -$ kubectl create namespace private -namespace/private created +_**Public:**_ -$ kubectl config set-context --current --namespace private -Context "minikube" modified. +~~~ shell +kubectl apply -f order-processor/kubernetes.yaml +kubectl apply -f market-data/kubernetes.yaml +kubectl apply -f frontend/kubernetes.yaml ~~~ -## Step 4: Install Skupper in your namespaces +## Step 5: Create your sites -The `skupper init` command installs the Skupper router and service -controller in the current namespace. Run the `skupper init` command -in each namespace. +A Skupper _site_ is a location where components of your +application are running. Sites are linked together to form a +network for your application. In Kubernetes, a site is associated +with a namespace. -**Note:** If you are using Minikube, [you need to start `minikube -tunnel`][minikube-tunnel] before you install Skupper. +For each namespace, use `skupper init` to create a site. This +deploys the Skupper router and controller. Then use `skupper +status` to see the outcome. + +**Note:** If you are using Minikube, you need to [start minikube +tunnel][minikube-tunnel] before you run `skupper init`. [minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel -_**Console for public:**_ +_**Public:**_ ~~~ shell skupper init +skupper status ~~~ _Sample output:_ @@ -188,13 +231,18 @@ _Sample output:_ ~~~ console $ skupper init Waiting for LoadBalancer IP or hostname... +Waiting for status... Skupper is now installed in namespace 'public'. Use 'skupper status' to get more information. + +$ skupper status +Skupper is enabled for namespace "public". It is not connected to any other sites. It has no exposed services. ~~~ -_**Console for private:**_ +_**Private:**_ ~~~ shell skupper init +skupper status ~~~ _Sample output:_ @@ -202,67 +250,40 @@ _Sample output:_ ~~~ console $ skupper init Waiting for LoadBalancer IP or hostname... +Waiting for status... Skupper is now installed in namespace 'private'. Use 'skupper status' to get more information. -~~~ - -## Step 5: Check the status of your namespaces - -Use `skupper status` in each console to check that Skupper is -installed. - -_**Console for public:**_ - -~~~ shell -skupper status -~~~ - -_Sample output:_ - -~~~ console -$ skupper status -Skupper is enabled for namespace "public" in interior mode. It is connected to 1 other site. It has 1 exposed service. -The site console url is: -The credentials for internal console-auth mode are held in secret: 'skupper-console-users' -~~~ - -_**Console for private:**_ -~~~ shell -skupper status -~~~ - -_Sample output:_ - -~~~ console $ skupper status -Skupper is enabled for namespace "private" in interior mode. It is connected to 1 other site. It has 1 exposed service. -The site console url is: -The credentials for internal console-auth mode are held in secret: 'skupper-console-users' +Skupper is enabled for namespace "private". It is not connected to any other sites. It has no exposed services. ~~~ As you move through the steps below, you can use `skupper status` at any time to check your progress. -## Step 6: Link your namespaces +## Step 6: Link your sites + +A Skupper _link_ is a channel for communication between two sites. +Links serve as a transport for application connections and +requests. Creating a link requires use of two `skupper` commands in conjunction, `skupper token create` and `skupper link create`. The `skupper token create` command generates a secret token that signifies permission to create a link. The token also carries the -link details. Then, in a remote namespace, The `skupper link -create` command uses the token to create a link to the namespace +link details. Then, in a remote site, The `skupper link +create` command uses the token to create a link to the site that generated it. **Note:** The link token is truly a *secret*. Anyone who has the -token can link to your namespace. Make sure that only those you -trust have access to it. +token can link to your site. Make sure that only those you trust +have access to it. -First, use `skupper token create` in one namespace to generate the -token. Then, use `skupper link create` in the other to create a -link. +First, use `skupper token create` in site Public to generate the +token. Then, use `skupper link create` in site Private to link +the sites. -_**Console for public:**_ +_**Public:**_ ~~~ shell skupper token create ~/secret.token @@ -275,7 +296,7 @@ $ skupper token create ~/secret.token Token written to ~/secret.token ~~~ -_**Console for private:**_ +_**Private:**_ ~~~ shell skupper link create ~/secret.token @@ -289,105 +310,54 @@ Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-0 Check the status of the link using 'skupper link status'. ~~~ -If your console sessions are on different machines, you may need -to use `sftp` or a similar tool to transfer the token securely. -By default, tokens expire after a single use or 15 minutes after +If your terminal sessions are on different machines, you may need +to use `scp` or a similar tool to transfer the token securely. By +default, tokens expire after a single use or 15 minutes after creation. -## Step 7: Deploy the Kafka cluster +## Step 7: Expose the Kafka cluster -In the private namespace, use the `kubectl create` and `kubectl -apply` commands with the listed YAML files to install the -operator and deploy the cluster and topic. +In Private, use `skupper expose` with the `--headless` option to +expose the Kafka cluster as a headless service on the Skupper +network. -_**Console for private:**_ +Then, in Public, use `kubectl get service` to check that the +`cluster1-kafka-brokers` service appears after a moment. -~~~ shell -kubectl create -f kafka-cluster/strimzi.yaml -kubectl apply -f kafka-cluster/cluster1.yaml -kubectl wait --for condition=ready --timeout 900s kafka/cluster1 -~~~ - -**Note:** - -By default, the Kafka bootstrap server returns broker addresses -that include the Kubernetes namespace in their domain name. -When, as in this example, the Kafka client is running in a -namespace with a different name from that of the Kafka cluster, -this prevents the client from resolving the Kafka brokers. - -To make the Kafka brokers reachable, set the `advertisedHost` -property of each broker to a domain name that the Kafka client -can resolve at the remote site. In this example, this is -achieved with the following listener configuration: - -~~~ yaml -spec: - kafka: - listeners: - - name: plain - port: 9092 - type: internal - tls: false - configuration: - brokers: - - broker: 0 - advertisedHost: cluster1-kafka-0.cluster1-kafka-brokers -~~~ - -See [Advertised addresses for brokers][advertised-addresses] for -more information. - -[advertised-addresses]: https://strimzi.io/docs/operators/in-development/configuring.html#property-listener-config-broker-reference - -## Step 8: Expose the Kafka cluster - -In the private namespace, use `skupper expose` with the -`--headless` option to expose the Kafka cluster as a headless -service on the Skupper network. - -Then, in the public namespace, use `kubectl get service` to -check that the `cluster1-kafka-brokers` service appears after a -moment. - -_**Console for private:**_ +_**Private:**_ ~~~ shell skupper expose statefulset/cluster1-kafka --headless --port 9092 ~~~ -_**Console for public:**_ +_**Public:**_ ~~~ shell kubectl get service/cluster1-kafka-brokers ~~~ -## Step 9: Deploy the application services - -In the public namespace, use the `kubectl apply` command with -the listed YAML files to install the application services. +## Step 8: Access the frontend -_**Console for public:**_ +In order to use and test the application, we need external access +to the frontend. -~~~ shell -kubectl apply -f order-processor/kubernetes.yaml -kubectl apply -f market-data/kubernetes.yaml -kubectl apply -f frontend/kubernetes.yaml -~~~ +Use `kubectl expose` with `--type LoadBalancer` to open network +access to the frontend service. -## Step 10: Test the application +Once the frontend is exposed, use `kubectl get service/frontend` +to look up the external IP of the frontend service. If the +external IP is ``, try again after a moment. -Now we're ready to try it out. Use `kubectl get service/frontend` -to look up the external IP of the frontend service. Then use -`curl` or a similar tool to request the `/api/health` endpoint at -that address. +Once you have the external IP, use `curl` or a similar tool to +request the `/api/health` endpoint at that address. **Note:** The `` field in the following commands is a placeholder. The actual value is an IP address. -_**Console for public:**_ +_**Public:**_ ~~~ shell +kubectl expose deployment/frontend --port 8080 --type LoadBalancer kubectl get service/frontend curl http://:8080/api/health ~~~ @@ -395,6 +365,9 @@ curl http://:8080/api/health _Sample output:_ ~~~ console +$ kubectl expose deployment/frontend --port 8080 --type LoadBalancer +service/frontend exposed + $ kubectl get service/frontend NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s @@ -406,46 +379,12 @@ OK If everything is in order, you can now access the web interface by navigating to `http://:8080/` in your browser. -## Accessing the web console - -Skupper includes a web console you can use to view the application -network. To access it, use `skupper status` to look up the URL of -the web console. Then use `kubectl get -secret/skupper-console-users` to look up the console admin -password. - -**Note:** The `` and `` fields in the -following output are placeholders. The actual values are specific -to your environment. - -_**Console for public:**_ - -~~~ shell -skupper status -kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d -~~~ - -_Sample output:_ - -~~~ console -$ skupper status -Skupper is enabled for namespace "public" in interior mode. It is connected to 1 other site. It has 1 exposed service. -The site console url is: -The credentials for internal console-auth mode are held in secret: 'skupper-console-users' - -$ kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d - -~~~ - -Navigate to `` in your browser. When prompted, log -in as user `admin` and enter the password. - ## Cleaning up To remove Skupper and the other resources from this exercise, use the following commands. -_**Console for private:**_ +_**Private:**_ ~~~ shell skupper delete @@ -453,7 +392,7 @@ kubectl delete -f kafka-cluster/cluster1.yaml kubectl delete -f kafka-cluster/strimzi.yaml ~~~ -_**Console for public:**_ +_**Public:**_ ~~~ shell skupper delete @@ -464,5 +403,18 @@ kubectl delete -f order-processor/kubernetes.yaml ## Next steps - Check out the other [examples][examples] on the Skupper website. + +## About this example + +This example was produced using [Skewer][skewer], a library for +documenting and testing Skupper examples. + +[skewer]: https://github.com/skupperproject/skewer + +Skewer provides utility functions for generating the README and +running the example steps. Use the `./plano` command in the project +root to see what is available. + +To quickly stand up the example using Minikube, try the `./plano demo` +command. diff --git a/external/skewer/.github/workflows/main.yaml b/external/skewer/.github/workflows/main.yaml new file mode 100644 index 0000000..09e2b58 --- /dev/null +++ b/external/skewer/.github/workflows/main.yaml @@ -0,0 +1,28 @@ +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + test: + strategy: + fail-fast: false + matrix: + skupper-version: [latest, main] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.x" + - uses: manusa/actions-setup-minikube@v2.10.0 + with: + minikube version: "v1.32.0" + kubernetes version: "v1.29.0" + github token: ${{secrets.GITHUB_TOKEN}} + - run: curl https://skupper.io/install.sh | bash -s -- --version ${{matrix.skupper-version}} + - run: echo "$HOME/.local/bin" >> $GITHUB_PATH + - run: ./plano test + env: + PLANO_COLOR: 1 diff --git a/external/skewer/.gitignore b/external/skewer/.gitignore new file mode 100644 index 0000000..f651c26 --- /dev/null +++ b/external/skewer/.gitignore @@ -0,0 +1,4 @@ +__pycache__/ +/README.html +/htmlcov +/.coverage diff --git a/external/skewer/.plano.py b/external/skewer/.plano.py new file mode 100644 index 0000000..fe427c3 --- /dev/null +++ b/external/skewer/.plano.py @@ -0,0 +1,68 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import skewer.tests + +from plano import * +from plano.github import * +from skewer import * + +@command(passthrough=True) +def test(passthrough_args=[]): + PlanoTestCommand(skewer.tests).main(args=passthrough_args) + +@command +def coverage(verbose=False, quiet=False): + check_program("coverage") + + with working_env(PYTHONPATH="python"): + run("coverage run --source skewer -m skewer.tests") + + run("coverage report") + run("coverage html") + + if not quiet: + print(f"file:{get_current_dir()}/htmlcov/index.html") + +@command +def render(verbose=False, quiet=False): + """ + Render README.html from README.md + """ + markdown = read("README.md") + html = convert_github_markdown(markdown) + + write("README.html", html) + + if not quiet: + print(f"file:{get_real_path('README.html')}") + +@command +def clean(): + remove(find(".", "__pycache__")) + remove("README.html") + remove("htmlcov") + remove(".coverage") + +@command +def update_plano(): + """ + Update the embedded Plano repo + """ + update_external_from_github("external/plano", "ssorj", "plano") diff --git a/subrepos/skewer/LICENSE.txt b/external/skewer/LICENSE.txt similarity index 100% rename from subrepos/skewer/LICENSE.txt rename to external/skewer/LICENSE.txt diff --git a/external/skewer/README.md b/external/skewer/README.md new file mode 100644 index 0000000..d1a94e4 --- /dev/null +++ b/external/skewer/README.md @@ -0,0 +1,287 @@ +# Skewer + +[![main](https://github.com/skupperproject/skewer/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skewer/actions/workflows/main.yaml) + +A library for documenting and testing Skupper examples + +A `skewer.yaml` file describes the steps and commands to achieve an +objective using Skupper. Skewer takes the `skewer.yaml` file as input +and produces two outputs: a `README.md` file and a test routine. + +## An example example + +[Example `skewer.yaml` file](example/skewer.yaml) + +[Example `README.md` output](example/README.md) + +## Setting up Skewer for your own example + +**Note:** This is how you set things up from scratch. You can also +use the [Skupper example template][template] as a starting point. + +[template]: https://github.com/skupperproject/skupper-example-template + +Change directory to the root of your example project: + + cd / + +Add the Skewer code as a subdirectory: + + mkdir -p external + curl -sfL https://github.com/skupperproject/skewer/archive/main.tar.gz | tar -C external -xz + mv external/skewer-main external/skewer + +Symlink the Skewer and Plano libraries into your `python` directory: + + mkdir -p python + ln -s ../external/skewer/python/skewer python/skewer + ln -s ../external/skewer/python/plano python/plano + +Copy the `plano` command into the root of your project: + + cp external/skewer/plano plano + +Copy the standard config files: + + cp external/skewer/config/.plano.py .plano.py + cp external/skewer/config/.gitignore .gitignore + +Copy the standard workflow file: + + mkdir -p .github/workflows + cp external/skewer/config/.github/workflows/main.yaml .github/workflows/main.yaml + +Use your editor to create a `skewer.yaml` file in the root of your +project: + + emacs skewer.yaml + +To use the `./plano` command, you must have the Python `pyyaml` +package installed. Use `pip` (or `pip3` on some systems) to install +it: + + pip install pyyaml + +Run the `./plano` command to see the available commands: + +~~~ console +$ ./plano +usage: plano [-h] [-f FILE] [-m MODULE] {command} ... + +Run commands defined as Python functions + +options: + -h, --help Show this help message and exit + -f FILE, --file FILE Load commands from FILE (default '.plano.py') + -m MODULE, --module MODULE + Load commands from MODULE + +commands: + {command} + generate Generate README.md from the data in skewer.yaml + render Render README.html from README.md + clean Clean up the source tree + run Run the example steps + demo Run the example steps and pause for a demo before cleaning up + test Test README generation and run the steps on Minikube + update-skewer Update the embedded Skewer repo and GitHub workflow +~~~ + +## Skewer YAML + +The top level: + +~~~ yaml +title: # Your example's title (required) +subtitle: # Your chosen subtitle (optional) +workflow: # The filename of your GitHub workflow (optional, default 'main.yaml') +overview: # Text introducing your example (optional) +prerequisites: # Text describing prerequisites (optional, has default text) +sites: # A map of named sites (see below) +steps: # A list of steps (see below) +summary: # Text to summarize what the user did (optional) +next_steps: # Text linking to more examples (optional, has default text) +~~~ + +To disable the GitHub workflow, set it to `null`. + +A **site**: + +~~~ yaml +: + title: # The site title (optional) + platform: # "kubernetes" or "podman" (required) + namespace: # The Kubernetes namespace (required for Kubernetes sites) + env: # A map of named environment variables +~~~ + +Kubernetes sites must have a `KUBECONFIG` environment variable with a +path to a kubeconfig file. A tilde (~) in the kubeconfig file path is +replaced with a temporary working directory during testing. + +Podman sites must have a `SKUPPER_PLATFORM` variable with the value +`podman`. + +Example sites: + +~~~ yaml +sites: + east: + title: East + platform: kubernetes + namespace: east + env: + KUBECONFIG: ~/.kube/config-east + west: + title: West + platform: podman + env: + SKUPPER_PLATFORM: podman +~~~ + +A **step**: + +~~~ yaml +- title: # The step title (required) + preamble: # Text before the commands (optional) + commands: # Named groups of commands. See below. + postamble: # Text after the commands (optional) +~~~ + +An example step: + +~~~ yaml +steps: + - title: Expose the frontend service + preamble: | + We have established connectivity between the two namespaces and + made the backend in `east` available to the frontend in `west`. + Before we can test the application, we need external access to + the frontend. + + Use `kubectl expose` with `--type LoadBalancer` to open network + access to the frontend service. Use `kubectl get services` to + check for the service and its external IP address. + commands: + east: + west: +~~~ + +Or you can use a named step from the library of standard steps: + +~~~ yaml +- standard: configure_separate_console_sessions +~~~ + +The standard steps are defined in +[python/skewer/standardsteps.yaml](python/skewer/standardsteps.yaml). +Note that you should not edit this file. Instead, in your +`skewer.yaml` file, you can create custom steps based on the standard +steps. You can override the `title`, `preamble`, `commands`, or +`postamble` field of a standard step by adding the field in addition +to `standard`: + +~~~ yaml +- standard: cleaning_up + commands: + east: + - run: skupper delete + - run: kubectl delete deployment/database + west: + - run: skupper delete +~~~ + +A typical mix of standard and custom steps might look like this: + +~~~ yaml +steps: + - standard: install_the_skupper_command_line_tool + - standard: kubernetes/set_up_your_namespaces + + - standard: kubernetes/create_your_sites + - standard: kubernetes/link_your_sites + + + - standard: cleaning_up +~~~ + +**Note:** The `link_your_sites` and `cleaning_up` steps are less +generic than the other steps. For example, `cleaning_up` doesn't +delete any application workoads. Check that the text and commands +these steps produce are doing what you need for your example. If not, +you need to provide a custom step. + +There are some standard steps for examples based on the Skupper +Hello World application: + +~~~ yaml +- standard: hello_world/deploy_the_frontend_and_backend +- standard: hello_world/expose_the_backend +- standard: hello_world/access_the_frontend +- standard: hello_world/cleaning_up +~~~ + +And finally there are some special cases: +~~~ yaml +- standard: kubernetes/set_up_your_kubernetes_namespace +- standard: podman/set_up_your_podman_network +~~~ + +The step commands are separated into named groups corresponding to the +sites. Each named group contains a list of command entries. Each +command entry has a `run` field containing a shell command and other +fields for awaiting completion or providing sample output. + +A **command**: + +~~~ yaml +- run: # A shell command (required) + apply: # Use this command only for "readme" or "test" (optional, default is both) + output: # Sample output to include in the README (optional) +~~~ + +Only the `run` and `output` fields are used in the README content. +The `output` field is used as sample output only, not for any kind of +testing. + +The `apply` field is useful when you want the readme instructions to +be different from the test procedure, or you simply want to omit +something. + +There are also some special "await" commands that you can use to pause +for a condition you require before going to the next step. They are +used only for testing and do not impact the README. + +~~~ yaml +- await_resource: # A resource for which to await readiness (optional) + # Example: await_resource: deployment/frontend +- await_ingress: # A service for which to await an external hostname or IP (optional) + # Example: await_ingress: service/frontend +- await_http_ok: # A service and URL template for which to await an HTTP OK response (optional) + # Example: await_http_ok: [service/frontend, "http://{}:8080/api/hello"] +~~~ + +Example commands: + +~~~ yaml +commands: + east: + - run: skupper expose deployment/backend --port 8080 + output: | + deployment backend exposed as backend + west: + - await_resource: service/backend + - run: kubectl get service/backend + output: | + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + backend ClusterIP 10.102.112.121 8080/TCP 30s +~~~ + +## Demo mode + +Skewer has a mode where it executes all the steps, but before cleaning +up and exiting, it pauses so you can inspect things. + +It is enabled by setting the environment variable `SKEWER_DEMO` to any +value when you call `./plano run` or one of its variants. You can +also use `./plano demo`, which sets the variable for you. diff --git a/subrepos/skewer/subrepos/plano/bin/plano-self-test.in b/external/skewer/config/.github/workflows/main.yaml old mode 100755 new mode 100644 similarity index 51% rename from subrepos/skewer/subrepos/plano/bin/plano-self-test.in rename to external/skewer/config/.github/workflows/main.yaml index da54368..db00f3c --- a/subrepos/skewer/subrepos/plano/bin/plano-self-test.in +++ b/external/skewer/config/.github/workflows/main.yaml @@ -1,4 +1,3 @@ -#!/usr/bin/python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -18,20 +17,31 @@ # under the License. # -import os -import sys - -from plano import PlanoTestCommand, PYTHON3 - -home = os.environ.get("PLANO_HOME", "@default_home@") -sys.path.insert(0, os.path.join(home, "python")) - -if __name__ == "__main__": - import plano_tests - test_modules = [plano_tests] - - if PYTHON3: - import bullseye_tests - test_modules.append(bullseye_tests) - - PlanoTestCommand(test_modules).main() +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + test: + strategy: + fail-fast: false + matrix: + skupper-version: [latest, main] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.x" + - uses: manusa/actions-setup-minikube@v2.10.0 + with: + minikube version: "v1.32.0" + kubernetes version: "v1.29.0" + github token: ${{secrets.GITHUB_TOKEN}} + - run: curl https://skupper.io/install.sh | bash -s -- --version ${{matrix.skupper-version}} + - run: echo "$HOME/.local/bin" >> "$GITHUB_PATH" + - run: ./plano test + env: + PLANO_COLOR: 1 diff --git a/subrepos/skewer/test-example/.gitignore b/external/skewer/config/.gitignore similarity index 50% rename from subrepos/skewer/test-example/.gitignore rename to external/skewer/config/.gitignore index 7bd2dc8..500983c 100644 --- a/subrepos/skewer/test-example/.gitignore +++ b/external/skewer/config/.gitignore @@ -1 +1,2 @@ /README.html +__pycache__/ diff --git a/external/skewer/config/.plano.py b/external/skewer/config/.plano.py new file mode 100644 index 0000000..4609d49 --- /dev/null +++ b/external/skewer/config/.plano.py @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from skewer.planocommands import * diff --git a/subrepos/skewer/.gitignore b/external/skewer/example/.gitignore similarity index 50% rename from subrepos/skewer/.gitignore rename to external/skewer/example/.gitignore index 3368b7b..500983c 100644 --- a/subrepos/skewer/.gitignore +++ b/external/skewer/example/.gitignore @@ -1,2 +1,2 @@ +/README.html __pycache__/ -README.html diff --git a/external/skewer/example/.plano.py b/external/skewer/example/.plano.py new file mode 100644 index 0000000..4609d49 --- /dev/null +++ b/external/skewer/example/.plano.py @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from skewer.planocommands import * diff --git a/external/skewer/example/README.md b/external/skewer/example/README.md new file mode 100644 index 0000000..4da73ac --- /dev/null +++ b/external/skewer/example/README.md @@ -0,0 +1,357 @@ +# Skupper Hello World + +[![main](https://github.com/skupperproject/skewer/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skewer/actions/workflows/main.yaml) + +#### A minimal HTTP application deployed across Kubernetes clusters using Skupper + +This example is part of a [suite of examples][examples] showing the +different ways you can use [Skupper][website] to connect services +across cloud providers, data centers, and edge sites. + +[website]: https://skupper.io/ +[examples]: https://skupper.io/examples/index.html + +#### Contents + +* [Overview](#overview) +* [Prerequisites](#prerequisites) +* [Step 1: Install the Skupper command-line tool](#step-1-install-the-skupper-command-line-tool) +* [Step 2: Set up your namespaces](#step-2-set-up-your-namespaces) +* [Step 3: Deploy the frontend and backend](#step-3-deploy-the-frontend-and-backend) +* [Step 4: Create your sites](#step-4-create-your-sites) +* [Step 5: Link your sites](#step-5-link-your-sites) +* [Step 6: Fail on demand](#step-6-fail-on-demand) +* [Step 7: Expose the backend](#step-7-expose-the-backend) +* [Step 8: Access the frontend](#step-8-access-the-frontend) +* [Cleaning up](#cleaning-up) +* [Summary](#summary) +* [Next steps](#next-steps) +* [About this example](#about-this-example) + +## Overview + +An overview + +## Prerequisites + +Some prerequisites + +## Step 1: Install the Skupper command-line tool + +This example uses the Skupper command-line tool to deploy Skupper. +You need to install the `skupper` command only once for each +development environment. + +On Linux or Mac, you can use the install script (inspect it +[here][install-script]) to download and extract the command: + +~~~ shell +curl https://skupper.io/install.sh | sh +~~~ + +The script installs the command under your home directory. It +prompts you to add the command to your path if necessary. + +For Windows and other installation options, see [Installing +Skupper][install-docs]. + +[install-script]: https://github.com/skupperproject/skupper-website/blob/main/input/install.sh +[install-docs]: https://skupper.io/install/ + +## Step 2: Set up your namespaces + +Skupper is designed for use with multiple Kubernetes namespaces, +usually on different clusters. The `skupper` and `kubectl` +commands use your [kubeconfig][kubeconfig] and current context to +select the namespace where they operate. + +[kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ + +Your kubeconfig is stored in a file in your home directory. The +`skupper` and `kubectl` commands use the `KUBECONFIG` environment +variable to locate it. + +A single kubeconfig supports only one active context per user. +Since you will be using multiple contexts at once in this +exercise, you need to create distinct kubeconfigs. + +For each namespace, open a new terminal window. In each terminal, +set the `KUBECONFIG` environment variable to a different path and +log in to your cluster. Then create the namespace you wish to use +and set the namespace on your current context. + +**Note:** The login procedure varies by provider. See the +documentation for yours: + +* [Minikube](https://skupper.io/start/minikube.html#cluster-access) +* [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html#cluster-access) +* [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html#cluster-access) +* [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html#cluster-access) +* [IBM Kubernetes Service](https://skupper.io/start/ibmks.html#cluster-access) +* [OpenShift](https://skupper.io/start/openshift.html#cluster-access) + +_**West:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-west +# Enter your provider-specific login command +kubectl create namespace west +kubectl config set-context --current --namespace west +~~~ + +_**East:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-east +# Enter your provider-specific login command +kubectl create namespace east +kubectl config set-context --current --namespace east +~~~ + +## Step 3: Deploy the frontend and backend + +This example runs the frontend and the backend in separate +Kubernetes namespaces, on different clusters. + +Use `kubectl create deployment` to deploy the frontend in +namespace `west` and the backend in namespace +`east`. + +_**West:**_ + +~~~ shell +kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend +~~~ + +_**East:**_ + +~~~ shell +kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 +~~~ + +## Step 4: Create your sites + +A Skupper _site_ is a location where components of your +application are running. Sites are linked together to form a +network for your application. In Kubernetes, a site is associated +with a namespace. + +For each namespace, use `skupper init` to create a site. This +deploys the Skupper router and controller. Then use `skupper +status` to see the outcome. + +**Note:** If you are using Minikube, you need to [start minikube +tunnel][minikube-tunnel] before you run `skupper init`. + +[minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel + +_**West:**_ + +~~~ shell +skupper init +skupper status +~~~ + +_Sample output:_ + +~~~ console +$ skupper init +Waiting for LoadBalancer IP or hostname... +Waiting for status... +Skupper is now installed in namespace 'west'. Use 'skupper status' to get more information. + +$ skupper status +Skupper is enabled for namespace "west". It is not connected to any other sites. It has no exposed services. +~~~ + +_**East:**_ + +~~~ shell +skupper init +skupper status +~~~ + +_Sample output:_ + +~~~ console +$ skupper init +Waiting for LoadBalancer IP or hostname... +Waiting for status... +Skupper is now installed in namespace 'east'. Use 'skupper status' to get more information. + +$ skupper status +Skupper is enabled for namespace "east". It is not connected to any other sites. It has no exposed services. +~~~ + +As you move through the steps below, you can use `skupper status` at +any time to check your progress. + +## Step 5: Link your sites + +A Skupper _link_ is a channel for communication between two sites. + +Creating a link requires use of two `skupper` commands in +conjunction, `skupper token create` and `skupper link create`. + +The `skupper token create` command generates a secret token that +signifies permission to create a link. The token also carries the +link details. Then, in a remote site, The `skupper link +create` command uses the token to create a link to the site +that generated it. + +**Note:** The link token is truly a *secret*. Anyone who has the +token can link to your site. Make sure that only those you trust +have access to it. + +First, use `skupper token create` in site West to generate the +token. Then, use `skupper link create` in site East to link +the sites. + +_**West:**_ + +~~~ shell +skupper token create ~/secret.token +~~~ + +_Sample output:_ + +~~~ console +$ skupper token create ~/secret.token +Token written to ~/secret.token +~~~ + +_**East:**_ + +~~~ shell +skupper link create ~/secret.token +~~~ + +_Sample output:_ + +~~~ console +$ skupper link create ~/secret.token +Site configured to link to (name=link1) +Check the status of the link using 'skupper link status'. +~~~ + +If your terminal sessions are on different machines, you may need +to use `scp` or a similar tool to transfer the token securely. By +default, tokens expire after a single use or 15 minutes after +creation. + +## Step 6: Fail on demand + +_**West:**_ + +~~~ shell +if [ -n "${SKEWER_FAIL}" ]; then expr 1 / 0; fi + +~~~ + +## Step 7: Expose the backend + +We now have our sites linked to form a Skupper network, but no +services are exposed on it. Skupper uses the `skupper expose` +command to select a service from one site for exposure in all the +linked sites. + +Use `skupper expose` to expose the backend service in East to +the frontend in West. + +_**East:**_ + +~~~ shell +skupper expose deployment/backend --port 8080 +~~~ + +_Sample output:_ + +~~~ console +$ skupper expose deployment/backend --port 8080 +deployment backend exposed as backend +~~~ + +## Step 8: Access the frontend + +In order to use and test the application, we need external access +to the frontend. + +Use `kubectl expose` with `--type LoadBalancer` to open network +access to the frontend service. + +Once the frontend is exposed, use `kubectl get service/frontend` +to look up the external IP of the frontend service. If the +external IP is ``, try again after a moment. + +Once you have the external IP, use `curl` or a similar tool to +request the `/api/health` endpoint at that address. + +**Note:** The `` field in the following commands is a +placeholder. The actual value is an IP address. + +_**West:**_ + +~~~ shell +kubectl expose deployment/frontend --port 8080 --type LoadBalancer +kubectl get service/frontend +curl http://:8080/api/health +~~~ + +_Sample output:_ + +~~~ console +$ kubectl expose deployment/frontend --port 8080 --type LoadBalancer +service/frontend exposed + +$ kubectl get service/frontend +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s + +$ curl http://:8080/api/health +OK +~~~ + +If everything is in order, you can now access the web interface by +navigating to `http://:8080/` in your browser. + +## Cleaning up + +To remove Skupper and the other resources from this exercise, use +the following commands: + +_**West:**_ + +~~~ shell +skupper delete +kubectl delete service/frontend +kubectl delete deployment/frontend +~~~ + +_**East:**_ + +~~~ shell +skupper delete +kubectl delete deployment/backend +~~~ + +## Summary + +A summary + +## Next steps + +Some next steps + +## About this example + +This example was produced using [Skewer][skewer], a library for +documenting and testing Skupper examples. + +[skewer]: https://github.com/skupperproject/skewer + +Skewer provides utility functions for generating the README and +running the example steps. Use the `./plano` command in the project +root to see what is available. + +To quickly stand up the example using Minikube, try the `./plano demo` +command. diff --git a/subrepos/skewer/subrepos/plano/scripts/test b/external/skewer/example/plano similarity index 80% rename from subrepos/skewer/subrepos/plano/scripts/test rename to external/skewer/example/plano index 40a1897..476427d 100755 --- a/subrepos/skewer/subrepos/plano/scripts/test +++ b/external/skewer/example/plano @@ -18,14 +18,11 @@ # under the License. # -from plano import * +import sys -if __name__ == "__main__": - import plano_tests - test_modules = [plano_tests] +sys.path.insert(0, "python") - if PYTHON3: - import bullseye_tests - test_modules.append(bullseye_tests) +from plano import PlanoCommand - PlanoTestCommand(test_modules).main() +if __name__ == "__main__": + PlanoCommand().main() diff --git a/external/skewer/example/python/plano b/external/skewer/example/python/plano new file mode 120000 index 0000000..2366248 --- /dev/null +++ b/external/skewer/example/python/plano @@ -0,0 +1 @@ +../../python/plano \ No newline at end of file diff --git a/external/skewer/example/python/skewer b/external/skewer/example/python/skewer new file mode 120000 index 0000000..d33ad4b --- /dev/null +++ b/external/skewer/example/python/skewer @@ -0,0 +1 @@ +../../python/skewer \ No newline at end of file diff --git a/external/skewer/example/skewer.yaml b/external/skewer/example/skewer.yaml new file mode 100644 index 0000000..b9fe344 --- /dev/null +++ b/external/skewer/example/skewer.yaml @@ -0,0 +1,37 @@ +title: Skupper Hello World +subtitle: A minimal HTTP application deployed across Kubernetes clusters using Skupper +overview: | + An overview +prerequisites: | + Some prerequisites +sites: + west: + title: West + platform: kubernetes + namespace: west + env: + KUBECONFIG: ~/.kube/config-west + east: + title: East + platform: kubernetes + namespace: east + env: + KUBECONFIG: ~/.kube/config-east +steps: + - standard: install_the_skupper_command_line_tool + - standard: kubernetes/set_up_your_namespaces + - standard: hello_world/deploy_the_frontend_and_backend + - standard: kubernetes/create_your_sites + - standard: kubernetes/link_your_sites + - title: Fail on demand + commands: + west: + - run: | + if [ -n "${SKEWER_FAIL}" ]; then expr 1 / 0; fi + - standard: hello_world/expose_the_backend + - standard: hello_world/access_the_frontend + - standard: hello_world/cleaning_up +summary: | + A summary +next_steps: | + Some next steps diff --git a/external/skewer/external/plano/.github/workflows/main.yaml b/external/skewer/external/plano/.github/workflows/main.yaml new file mode 100644 index 0000000..83ba30d --- /dev/null +++ b/external/skewer/external/plano/.github/workflows/main.yaml @@ -0,0 +1,48 @@ +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + main: + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + version: [3.7, 3.x] + runs-on: ${{matrix.os}} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{matrix.version}} + - run: pip install build wheel + - run: python -m build + - run: pip install dist/ssorj_plano-1.0.0-py3-none-any.whl + - run: plano-self-test + cygwin: + runs-on: windows-latest + steps: + - run: git config --global core.autocrlf input + - uses: actions/checkout@v3 + - uses: cygwin/cygwin-install-action@master + with: + packages: python3 + - run: pip install build wheel + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + - run: make install + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + - run: echo "C:\Users\runneradmin\AppData\Roaming\Python\Python39\Scripts" >> "$GITHUB_PATH" + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + - run: plano-self-test + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + fedora: + runs-on: ubuntu-latest + container: fedora:latest + steps: + - uses: actions/checkout@v3 + - run: dnf -y install make pip python python-build python-wheel + - run: make install + - run: echo "$HOME/.local/bin" >> "$GITHUB_PATH" + - run: plano-self-test diff --git a/subrepos/skewer/subrepos/plano/.gitignore b/external/skewer/external/plano/.gitignore similarity index 64% rename from subrepos/skewer/subrepos/plano/.gitignore rename to external/skewer/external/plano/.gitignore index 8b940c3..3af00c3 100644 --- a/subrepos/skewer/subrepos/plano/.gitignore +++ b/external/skewer/external/plano/.gitignore @@ -1,7 +1,6 @@ -*.pyc __pycache__/ +*.egg-info/ /build /dist /.coverage /htmlcov -test-project/build diff --git a/subrepos/skewer/subrepos/plano/LICENSE.txt b/external/skewer/external/plano/LICENSE.txt similarity index 100% rename from subrepos/skewer/subrepos/plano/LICENSE.txt rename to external/skewer/external/plano/LICENSE.txt diff --git a/external/skewer/external/plano/MANIFEST.in b/external/skewer/external/plano/MANIFEST.in new file mode 100644 index 0000000..778ca32 --- /dev/null +++ b/external/skewer/external/plano/MANIFEST.in @@ -0,0 +1 @@ +include src/plano/_testproject/* diff --git a/external/skewer/external/plano/Makefile b/external/skewer/external/plano/Makefile new file mode 100644 index 0000000..28212e5 --- /dev/null +++ b/external/skewer/external/plano/Makefile @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +.NOTPARALLEL: + +# A workaround for an install-with-prefix problem in Fedora 36 +# +# https://docs.fedoraproject.org/en-US/fedora/latest/release-notes/developers/Development_Python/#_pipsetup_py_installation_with_prefix +# https://bugzilla.redhat.com/show_bug.cgi?id=2026979 + +export RPM_BUILD_ROOT := fake + +.PHONY: build +build: + python -m build + +.PHONY: test +test: clean build + python -m venv build/venv + . build/venv/bin/activate && pip install --force-reinstall dist/ssorj_plano-*-py3-none-any.whl + . build/venv/bin/activate && plano-self-test + +.PHONY: qtest +qtest: + PYTHONPATH=src python -m plano._tests + +.PHONY: install +install: build + pip install --user --force-reinstall dist/ssorj_plano-*-py3-none-any.whl + +.PHONY: clean +clean: + rm -rf build dist htmlcov .coverage src/plano/__pycache__ src/plano.egg-info + +.PHONY: docs +docs: + mkdir -p build + sphinx-build -M html docs build/docs + +# XXX Watch out: The 3.11 in this is environment dependent +.PHONY: coverage +coverage: build + python -m venv build/venv + . build/venv/bin/activate && pip install --force-reinstall dist/ssorj_plano-*-py3-none-any.whl + . build/venv/bin/activate && PYTHONPATH=build/venv/lib/python3.11/site-packages coverage run \ + --include build/venv/lib/python\*/site-packages/plano/\*,build/venv/bin/\* \ + build/venv/bin/plano-self-test + coverage report + coverage html + @echo "OUTPUT: file:${CURDIR}/htmlcov/index.html" + +.PHONY: upload +upload: build + twine upload --repository testpypi dist/* diff --git a/external/skewer/external/plano/README.md b/external/skewer/external/plano/README.md new file mode 100644 index 0000000..fb4b1c2 --- /dev/null +++ b/external/skewer/external/plano/README.md @@ -0,0 +1,90 @@ +# Plano + +[![main](https://github.com/ssorj/plano/workflows/main/badge.svg)](https://github.com/ssorj/plano/actions?query=workflow%3Amain) + +Python functions for writing shell-style system scripts. + +## Installation + +To install plano globally for the current user: + +~~~ +make install +~~~ + +## A self-contained command with subcommands + +`~/.local/bin/widget`: +~~~ python +#!/usr/bin/python + +import sys +from plano import * + +@command +def greeting(message="Howdy"): + print(message) + +if __name__ == "__main__": + PlanoCommand(sys.modules[__name__]).main() +~~~ + +~~~ shell +$ widget greeting --message Hello +--> greeting +Hello +<-- greeting +OK (0s) +~~~ + +## A self-contained test command + +`~/.local/bin/widget-test`: +~~~ python +import sys +from plano import * + +@test +def check(): + run("widget greeting --message Yo") + +if __name__ == "__main__": + PlanoTestCommand(sys.modules[__name__]).main() +~~~ + +~~~ shell +$ widget-test +=== Configuration === +Modules: __main__ +Test timeout: 5m +Fail fast: False + +=== Module '__main__' === +check ........................................................... PASSED 0.0s + +=== Summary === +Total: 1 +Skipped: 0 +Failed: 0 + +=== RESULT === +All tests passed +~~~ + +## Programmatic test definition + +~~~ python +from plano import * + +def test_widget(message): + run(f"widget greeting --message {message}") + +for message in "hi", "lo", "in between": + add_test(f"message-{message}", test_widget, message) +~~~ + +## Things to know + +* The plano command accepts command sequences in the form "this,that" + (no spaces). The command arguments are applied to the last command + only. diff --git a/subrepos/skewer/subrepos/plano/bin/plano b/external/skewer/external/plano/bin/plano similarity index 79% rename from subrepos/skewer/subrepos/plano/bin/plano rename to external/skewer/external/plano/bin/plano index b5987ea..476427d 100755 --- a/subrepos/skewer/subrepos/plano/bin/plano +++ b/external/skewer/external/plano/bin/plano @@ -18,15 +18,9 @@ # under the License. # -import os import sys -if os.path.islink(__file__): - source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - sys.path.insert(0, os.path.join(source_dir, "python")) - -if os.path.isdir("python"): - sys.path.insert(0, "python") +sys.path.insert(0, "python") from plano import PlanoCommand diff --git a/subrepos/skewer/subrepos/plano/bin/planotest b/external/skewer/external/plano/bin/plano-test similarity index 79% rename from subrepos/skewer/subrepos/plano/bin/planotest rename to external/skewer/external/plano/bin/plano-test index 122d34d..f92ad34 100755 --- a/subrepos/skewer/subrepos/plano/bin/planotest +++ b/external/skewer/external/plano/bin/plano-test @@ -18,15 +18,9 @@ # under the License. # -import os import sys -if os.path.islink(__file__): - source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - sys.path.insert(0, os.path.join(source_dir, "python")) - -if os.path.isdir("python"): - sys.path.insert(0, "python") +sys.path.insert(0, "python") from plano import PlanoTestCommand diff --git a/subrepos/skewer/subrepos/plano/docs/conf.py b/external/skewer/external/plano/docs/conf.py similarity index 100% rename from subrepos/skewer/subrepos/plano/docs/conf.py rename to external/skewer/external/plano/docs/conf.py diff --git a/subrepos/skewer/subrepos/plano/docs/index.rst b/external/skewer/external/plano/docs/index.rst similarity index 100% rename from subrepos/skewer/subrepos/plano/docs/index.rst rename to external/skewer/external/plano/docs/index.rst diff --git a/external/skewer/external/plano/pyproject.toml b/external/skewer/external/plano/pyproject.toml new file mode 100644 index 0000000..a682141 --- /dev/null +++ b/external/skewer/external/plano/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = [ "setuptools", "setuptools-scm" ] +build-backend = "setuptools.build_meta" + +[project] +name = "ssorj-plano" +version = "1.0.0" +authors = [ { name = "Justin Ross", email = "jross@apache.org" } ] +description = "Python functions for writing shell-style system scripts" +license = { file = "LICENSE.txt" } +readme = "README.md" +classifiers = [ "License :: OSI Approved :: Apache Software License" ] +requires-python = ">=3.7" +dependencies = [ "PyYAML" ] + +[project.scripts] +plano = "plano.command:_main" +plano-test = "plano.test:_main" +plano-self-test = "plano._tests:main" + +[project.urls] +"Homepage" = "https://github.com/ssorj/plano" +"Bug Tracker" = "https://github.com/ssorj/plano/issues" diff --git a/subrepos/skewer/subrepos/plano/scripts/test-ubuntu.dockerfile b/external/skewer/external/plano/src/plano/__init__.py similarity index 77% rename from subrepos/skewer/subrepos/plano/scripts/test-ubuntu.dockerfile rename to external/skewer/external/plano/src/plano/__init__.py index 6fc1f02..3218323 100644 --- a/subrepos/skewer/subrepos/plano/scripts/test-ubuntu.dockerfile +++ b/external/skewer/external/plano/src/plano/__init__.py @@ -17,12 +17,8 @@ # under the License. # -FROM ubuntu +from .main import * +from .main import _default_sigterm_handler -RUN apt-get update -qq && apt-get upgrade -y -qq - -RUN apt-get -y install curl make python3 python3-distutils python3-yaml - -COPY . /root/plano -WORKDIR /root/plano -CMD ["make", "test", "install", "PREFIX=/usr/local"] +from .command import * +from .test import * diff --git a/external/skewer/external/plano/src/plano/_testproject/.plano.py b/external/skewer/external/plano/src/plano/_testproject/.plano.py new file mode 100644 index 0000000..8cda2e7 --- /dev/null +++ b/external/skewer/external/plano/src/plano/_testproject/.plano.py @@ -0,0 +1,112 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * + +@command +def base_command(alpha, beta, omega="x"): + """ + Base command help + """ + + print("base", alpha, beta, omega) + +@command(name="extended-command", parent=base_command) +def extended_command(alpha, beta, omega="y"): + print("extended", alpha, omega) + parent(alpha, beta, omega) + +@command(parameters=[CommandParameter("message_", help="The message to print", display_name="message"), + CommandParameter("count", help="Print the message COUNT times"), + CommandParameter("extra", default=1, short_option="e")]) +def echo(message_, count=1, extra=None, trouble=False, verbose=False): + """ + Print a message to the console + """ + + print("Echoing (message={}, count={})".format(message_, count)) + + if trouble: + raise Exception("Trouble") + + for i in range(count): + print(message_) + +@command +def echoecho(message): + echo(message) + +@command +def haberdash(first, *middle, last="bowler"): + """ + Habberdash command help + """ + + data = [first, *middle, last] + write_json("haberdash.json", data) + +@command(parameters=[CommandParameter("optional", positional=True)]) +def balderdash(required, optional="malarkey", other="rubbish", **extra_kwargs): + """ + Balderdash command help + """ + + data = [required, optional, other] + write_json("balderdash.json", data) + +@command +def splasher(): + write_json("splasher.json", [1]) + +@command +def dasher(alpha, beta=123): + pass + +@command(passthrough=True) +def dancer(gamma, omega="abc", passthrough_args=[]): + write_json("dancer.json", passthrough_args) + +# Vixen's parent calls prancer. We are testing to ensure the extended +# prancer (below) is executed. + +from plano._tests import prancer, vixen + +@command(parent=prancer) +def prancer(): + parent() + + notice("Extended prancer") + + write_json("prancer.json", True) + +@command(parent=vixen) +def vixen(): + parent() + +@command +def no_parent(): + parent() + +@command(parameters=[CommandParameter("spinach")]) +def feta(*args, **kwargs): + write_json("feta.json", kwargs["spinach"]) + +@command(hidden=True) +def invisible(something="nothing"): + write_json("invisible.json", something) diff --git a/subrepos/skewer/subrepos/plano/test-project/bin/chucker-test b/external/skewer/external/plano/src/plano/_testproject/src/chucker/__init__.py similarity index 100% rename from subrepos/skewer/subrepos/plano/test-project/bin/chucker-test rename to external/skewer/external/plano/src/plano/_testproject/src/chucker/__init__.py diff --git a/subrepos/skewer/subrepos/plano/scripts/test-centos-8.dockerfile b/external/skewer/external/plano/src/plano/_testproject/src/chucker/moretests.py similarity index 77% rename from subrepos/skewer/subrepos/plano/scripts/test-centos-8.dockerfile rename to external/skewer/external/plano/src/plano/_testproject/src/chucker/moretests.py index 1005404..2607880 100644 --- a/subrepos/skewer/subrepos/plano/scripts/test-centos-8.dockerfile +++ b/external/skewer/external/plano/src/plano/_testproject/src/chucker/moretests.py @@ -17,12 +17,8 @@ # under the License. # -FROM centos:stream8 +from plano import * -RUN dnf -qy update && dnf -q clean all - -RUN dnf -y install make python2 python2-pyyaml python3 python3-pyyaml - -COPY . /root/plano -WORKDIR /root/plano -CMD ["make", "clean", "test", "install", "PREFIX=/usr/local"] +@test +def hello_again(): + print("Hello again") diff --git a/external/skewer/external/plano/src/plano/_testproject/src/chucker/tests.py b/external/skewer/external/plano/src/plano/_testproject/src/chucker/tests.py new file mode 100644 index 0000000..4e0cec1 --- /dev/null +++ b/external/skewer/external/plano/src/plano/_testproject/src/chucker/tests.py @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * + +@test +def hello(): + print("Hello") + +@test +async def hello_async(): + print("Hello") + +@test +def goodbye(): + print("Goodbye") + +@test(disabled=True) +def badbye(): + print("Badbye") + assert False + +@test(disabled=True) +def skipped(): + skip_test("Skipped") + assert False + +@test(disabled=True) +def keyboard_interrupt(): + raise KeyboardInterrupt() + +@test(disabled=True, timeout=0.05) +def timeout(): + sleep(10, quiet=True) + assert False + +@test(disabled=True) +def process_error(): + run("expr 1 / 0") + +@test(disabled=True) +def system_exit_(): + exit(1) + +def test_widget(message): + print(message) + +for message in "hi", "lo", "in between": + add_test(f"message-{message}", test_widget, message) + +@test(disabled=True) +def badbye2(): + print("Badbye 2") + assert False diff --git a/subrepos/skewer/subrepos/plano/python/plano_tests.py b/external/skewer/external/plano/src/plano/_tests.py similarity index 70% rename from subrepos/skewer/subrepos/plano/python/plano_tests.py rename to external/skewer/external/plano/src/plano/_tests.py index 7d2595b..dd91212 100644 --- a/subrepos/skewer/subrepos/plano/python/plano_tests.py +++ b/external/skewer/external/plano/src/plano/_tests.py @@ -17,21 +17,24 @@ # under the License. # +import datetime as _datetime +import getpass as _getpass import os as _os -import pwd as _pwd import signal as _signal import socket as _socket import sys as _sys import threading as _threading +from .github import * + try: import http.server as _http except ImportError: # pragma: nocover import BaseHTTPServer as _http -from plano import * +from .test import * -test_project_dir = join(get_parent_dir(get_parent_dir(__file__)), "test-project") +test_project_dir = join(get_parent_dir(__file__), "_testproject") class test_project(working_dir): def __enter__(self): @@ -48,46 +51,52 @@ def archive_operations(): touch("some-dir/some-file") make_archive("some-dir") - assert is_file("some-dir.tar.gz") + assert is_file("some-dir.tar.gz"), list_dir() extract_archive("some-dir.tar.gz", output_dir="some-subdir") - assert is_dir("some-subdir/some-dir") - assert is_file("some-subdir/some-dir/some-file") + assert is_dir("some-subdir/some-dir"), list_dir("some-subdir") + assert is_file("some-subdir/some-dir/some-file"), list_dir("some-subdir/some-dir") rename_archive("some-dir.tar.gz", "something-else") - assert is_file("something-else.tar.gz") + assert is_file("something-else.tar.gz"), list_dir() extract_archive("something-else.tar.gz") - assert is_dir("something-else") - assert is_file("something-else/some-file") + assert is_dir("something-else"), list_dir() + assert is_file("something-else/some-file"), list_dir("something-else") @test def command_operations(): class SomeCommand(BaseCommand): def __init__(self): + super().__init__() + self.parser = BaseArgumentParser() self.parser.add_argument("--interrupt", action="store_true") self.parser.add_argument("--explode", action="store_true") + self.parser.add_argument("--verbose", action="store_true") + self.parser.add_argument("--quiet", action="store_true") def parse_args(self, args): return self.parser.parse_args(args) def init(self, args): - self.verbose = args.verbose self.interrupt = args.interrupt self.explode = args.explode + self.verbose = args.verbose + self.quiet = args.quiet def run(self): - if self.verbose: - print("Hello") - if self.interrupt: raise KeyboardInterrupt() if self.explode: raise PlanoError("Exploded") + if self.verbose: + print("Hello") + SomeCommand().main([]) + SomeCommand().main(["--verbose"]) SomeCommand().main(["--interrupt"]) with expect_system_exit(): @@ -134,10 +143,14 @@ def dir_operations(): result = list_dir() assert result == [], result + print_dir() + print_dir(test_dir) + print_dir(test_dir, "*.not-there") + result = find(test_dir) assert result == [test_file_1, test_file_2], (result, [test_file_1, test_file_2]) - result = find(test_dir, "*-file-1") + result = find(test_dir, include="*-file-1") assert result == [test_file_1], (result, [test_file_1]) result = find(test_dir, exclude="*-file-1") @@ -147,6 +160,11 @@ def dir_operations(): result = find() assert result == [], result + make_dir("subdir") + + result = find("./subdir") + assert result == [], result + with working_dir(): with working_dir("a-dir", quiet=True): touch("a-file") @@ -162,7 +180,7 @@ def dir_operations(): @test def env_operations(): result = join_path_var("a", "b", "c", "a") - assert result == "a:b:c", result + assert result == _os.pathsep.join(("a", "b", "c")), result curr_dir = get_current_dir() @@ -170,12 +188,12 @@ def env_operations(): assert get_current_dir() == curr_dir, (get_current_dir(), curr_dir) result = get_home_dir() - assert result == ENV["HOME"], result + assert result == _os.path.expanduser("~"), (result, _os.path.expanduser("~")) result = get_home_dir("alice") assert result.endswith("alice"), result - user = _pwd.getpwuid(_os.getuid())[0] + user = _getpass.getuser() result = get_user() assert result == user, (result, user) @@ -220,6 +238,8 @@ def env_operations(): with open(out, "w") as f: print_env(file=f) + print_stack() + @test def file_operations(): with working_dir(): @@ -298,10 +318,48 @@ def file_operations(): result = get_file_size(file) assert result == 10, result + zeta_dir = make_dir("zeta-dir") + zeta_file = touch(join(zeta_dir, "zeta-file")) + + eta_dir = make_dir("eta-dir") + eta_file = touch(join(eta_dir, "eta-file")) + + replace(zeta_dir, eta_dir) + assert not exists(zeta_file) + assert exists(zeta_dir) + assert is_file(join(zeta_dir, "eta-file")) + + with expect_exception(): + replace(zeta_dir, "not-there") + + assert exists(zeta_dir) + assert is_file(join(zeta_dir, "eta-file")) + + theta_file = write("theta-file", "theta") + iota_file = write("iota-file", "iota") + + replace(theta_file, iota_file) + assert not exists(iota_file) + assert read(theta_file) == "iota" + +@test +def github_operations(): + result = convert_github_markdown("# Hello, Fritz") + assert "Hello, Fritz" in result, result + + with working_dir(): + update_external_from_github("temp", "ssorj", "plano") + assert is_file("temp/Makefile"), list_dir("temp") + @test def http_operations(): class Handler(_http.BaseHTTPRequestHandler): def do_GET(self): + if not self.path.startswith("/api"): + self.send_response(404) + self.end_headers() + return + self.send_response(200) self.end_headers() self.wfile.write(b"[1]") @@ -330,10 +388,17 @@ def run(self): self.server.serve_forever() host, port = "localhost", get_random_port() - url = "http://{0}:{1}".format(host, port) - server = _http.HTTPServer((host, port), Handler) - server_thread = ServerThread(server) + url = "http://{}:{}/api".format(host, port) + missing_url = "http://{}:{}/nono".format(host, port) + try: + server = _http.HTTPServer((host, port), Handler) + except (OSError, PermissionError): # pragma: nocover + # Try one more time + port = get_random_port() + server = _http.HTTPServer((host, port), Handler) + + server_thread = ServerThread(server) server_thread.start() try: @@ -341,9 +406,15 @@ def run(self): result = http_get(url) assert result == "[1]", result + with expect_error(): + http_get(missing_url) + result = http_get(url, insecure=True) assert result == "[1]", result + result = http_get(url, user="fritz", password="secret") + assert result == "[1]", result + result = http_get(url, output_file="a") output = read("a") assert result is None, result @@ -409,6 +480,9 @@ def io_operations(): "alpha\n", "beta\n", "gamma\n", + "chi\n", + "psi\n", + "omega\n", ] file_b = write_lines("b", input_lines) @@ -417,7 +491,7 @@ def io_operations(): assert input_lines == output_lines, (input_lines, output_lines) pre_lines = ["pre-alpha\n"] - post_lines = ["post-gamma\n"] + post_lines = ["post-omega\n"] prepend_lines(file_b, pre_lines) append_lines(file_b, post_lines) @@ -426,21 +500,21 @@ def io_operations(): tailed_lines = tail_lines(file_b, 1) assert output_lines[0] == pre_lines[0], (output_lines[0], pre_lines[0]) - assert output_lines[4] == post_lines[0], (output_lines[4], post_lines[0]) + assert output_lines[-1] == post_lines[0], (output_lines[-1], post_lines[0]) assert tailed_lines[0] == post_lines[0], (tailed_lines[0], post_lines[0]) file_c = touch("c") assert is_file(file_c), file_c file_d = write("d", "front@middle@@middle@back") - replace_in_file(file_d, "@middle@", "M", count=1) - result = read(file_d) + path = string_replace_file(file_d, "@middle@", "M", count=1) + result = read(path) assert result == "frontM@middle@back", result file_e = write("e", "123") file_f = write("f", "456") - concatenate("g", (file_e, "not-there", file_f)) - result = read("g") + path = concatenate("g", (file_e, "not-there", file_f)) + result = read(path) assert result == "123456", result @test @@ -473,6 +547,10 @@ def json_operations(): assert input_data == parsed_data, (input_data, parsed_data) assert json == emitted_json, (json, emitted_json) + with expect_output(equals=emitted_json) as out: + with open(out, "w") as f: + print_json(input_data, file=f, end="") + @test def link_operations(): with working_dir(): @@ -482,16 +560,16 @@ def link_operations(): with working_dir("another-dir"): link = make_link("a-link", path) linked_path = read_link(link) - assert linked_path == path, (linked_path, path) + assert linked_path.endswith(path), (linked_path, path) @test def logging_operations(): error("Error!") - warn("Warning!") + warning("Warning!") notice("Take a look!") notice(123) debug("By the way") - debug("abc{0}{1}{2}", 1, 2, 3) + debug("abc{}{}{}", 1, 2, 3) with expect_exception(RuntimeError): fail(RuntimeError("Error!")) @@ -499,7 +577,10 @@ def logging_operations(): with expect_error(): fail("Error!") - for level in ("debug", "notice", "warn", "error"): + with expect_error(): + fail("Error! {}", "Let me elaborate") + + for level in ("debug", "notice", "warning", "error"): with expect_output(contains="Hello") as out: with logging_disabled(): with logging_enabled(level=level, output=out): @@ -510,19 +591,34 @@ def logging_operations(): with logging_disabled(): error("Yikes") + with expect_output(contains="flipper") as out: + with logging_enabled(output=out): + with logging_context("flipper"): + notice("Whhat") + + with logging_context("bip"): + with logging_context("boop"): + error("It's alarming!") + @test def path_operations(): + abspath = _os.path.abspath + normpath = _os.path.normpath + with working_dir("/"): - curr_dir = get_current_dir() - assert curr_dir == "/", curr_dir + result = get_current_dir() + expect = abspath(_os.sep) + assert result == expect, (result, expect) path = "a/b/c" result = get_absolute_path(path) - assert result == join(curr_dir, path), result + expect = join(get_current_dir(), path) + assert result == expect, (result, expect) path = "/x/y/z" result = get_absolute_path(path) - assert result == path, result + expect = abspath(path) + assert result == expect, (result, expect) path = "/x/y/z" assert is_absolute(path) @@ -532,23 +628,28 @@ def path_operations(): path = "a//b/../c/" result = normalize_path(path) - assert result == "a/c", result + expect = normpath("a/c") + assert result == expect, (result, expect) path = "/a/../c" result = get_real_path(path) - assert result == "/c", result + expect = abspath("/c") + assert result == expect, (result, expect) - path = "/a/b" + path = abspath("/a/b") result = get_relative_path(path, "/a/c") - assert result == "../b", result + expect = normpath("../b") + assert result == expect, (result, expect) - path = "/a/b" + path = abspath("/a/b") result = get_file_url(path) - assert result == "file:/a/b", result + expect = "file:{}".format(path) + assert result == expect, (result, expect) with working_dir(): result = get_file_url("afile") - assert result == "file:{0}/afile".format(get_current_dir()), result + expect = join(get_file_url(get_current_dir()), "afile") + assert result == expect, (result, expect) path = "/alpha/beta.ext" path_split = "/alpha", "beta.ext" @@ -556,28 +657,36 @@ def path_operations(): name_split_extension = "beta", ".ext" result = join(*path_split) - assert result == path, result + expect = normpath(path) + assert result == expect, (result, expect) result = split(path) - assert result == path_split, result + expect = normpath(path_split[0]), normpath(path_split[1]) + assert result == expect, (result, expect) result = split_extension(path) - assert result == path_split_extension, result + expect = normpath(path_split_extension[0]), normpath(path_split_extension[1]) + assert result == expect, (result, expect) result = get_parent_dir(path) - assert result == path_split[0], result + expect = normpath(path_split[0]) + assert result == expect, (result, expect) result = get_base_name(path) - assert result == path_split[1], result + expect = normpath(path_split[1]) + assert result == expect, (result, expect) result = get_name_stem(path) - assert result == name_split_extension[0], result + expect = normpath(name_split_extension[0]) + assert result == expect, (result, expect) result = get_name_stem("alpha.tar.gz") - assert result == "alpha", result + expect = "alpha" + assert result == expect, (result, expect) result = get_name_extension(path) - assert result == name_split_extension[1], result + expect = normpath(name_split_extension[1]) + assert result == expect, (result, expect) with working_dir(): touch("adir/afile") @@ -604,8 +713,9 @@ def path_operations(): await_exists("adir/afile") - with expect_timeout(): - await_exists("adir/notafile", timeout=TINY_INTERVAL) + if not WINDOWS: + with expect_timeout(): + await_exists("adir/notafile", timeout=TINY_INTERVAL) @test def port_operations(): @@ -616,7 +726,13 @@ def port_operations(): server_socket = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) try: - server_socket.bind(("localhost", server_port)) + try: + server_socket.bind(("localhost", server_port)) + except (OSError, PermissionError): # pragma: nocover + # Try one more time + server_port = get_random_port() + server_socket.bind(("localhost", server_port)) + server_socket.listen(5) await_port(server_port) @@ -624,13 +740,17 @@ def port_operations(): check_port(server_port) - with expect_error(): - get_random_port(min=server_port, max=server_port) + # Non-Linux platforms don't seem to produce the expected + # error. + if LINUX: + with expect_error(): + get_random_port(min=server_port, max=server_port) finally: server_socket.close() - with expect_timeout(): - await_port(get_random_port(), timeout=TINY_INTERVAL) + if not WINDOWS: + with expect_timeout(): + await_port(get_random_port(), timeout=TINY_INTERVAL) @test def process_operations(): @@ -644,6 +764,9 @@ def process_operations(): run("date", stash=True) + run(["echo", 1, 2, 3]) + run(["echo", 1, 2, 3], shell=True) + proc = run(["echo", "hello"], check=False) assert proc.exit_code == 0, proc.exit_code @@ -668,18 +791,20 @@ def process_operations(): with expect_error(): run("cat /whoa/not/really", stash=True) - result = call("echo hello") - assert result == "hello\n", result + result = call("echo hello").strip() + expect = "hello" + assert result == expect, (result, expect) - result = call("echo hello | cat", shell=True) - assert result == "hello\n", result + result = call("echo hello | cat", shell=True).strip() + expect = "hello" + assert result == expect, (result, expect) with expect_error(): call("cat /whoa/not/really") - if PYTHON3: - proc = start("sleep 10") + proc = start("sleep 10") + if not WINDOWS: with expect_timeout(): wait(proc, timeout=TINY_INTERVAL) @@ -740,10 +865,10 @@ def process_operations(): @test def string_operations(): - result = replace("ab", "a", "b") + result = string_replace("ab", "a", "b") assert result == "bb", result - result = replace("aba", "a", "b", count=1) + result = string_replace("aba", "a", "b", count=1) assert result == "bba", result result = remove_prefix(None, "xxx") @@ -817,6 +942,9 @@ def string_operations(): decoded_result = url_decode(encoded_result) assert decoded_result == "abc=123&yeah!", decoded_result + result = parse_url("http://example.net/index.html") + assert result.hostname == "example.net" + @test def temp_operations(): system_temp_dir = get_system_temp_dir() @@ -853,44 +981,54 @@ def temp_operations(): @test def test_operations(): with test_project(): - with working_module_path("python"): + with working_module_path("src"): import chucker - import chucker_tests + import chucker.tests + import chucker.moretests - print_tests(chucker_tests) + print_tests(chucker.tests) for verbose in (False, True): - run_tests(chucker_tests, verbose=verbose) - run_tests(chucker_tests, exclude="*hello*", verbose=verbose) - + # Module 'chucker' has no tests with expect_error(): run_tests(chucker, verbose=verbose) + run_tests(chucker.tests, verbose=verbose) + run_tests(chucker.tests, exclude="*hello*", verbose=verbose) + run_tests(chucker.tests, enable="skipped", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="skipped", unskip="*skipped*", verbose=verbose) + with expect_error(): - run_tests(chucker_tests, enable="*badbye*", verbose=verbose) + run_tests(chucker.tests, enable="*badbye*", verbose=verbose) with expect_error(): - run_tests(chucker_tests, enable="*badbye*", fail_fast=True, verbose=verbose) + run_tests(chucker.tests, enable="*badbye*", fail_fast=True, verbose=verbose) + + with expect_error(): + run_tests([chucker.tests, chucker.moretests], enable="*badbye2*", fail_fast=True, verbose=verbose) with expect_exception(KeyboardInterrupt): - run_tests(chucker_tests, enable="test_keyboard_interrupt", verbose=verbose) + run_tests(chucker.tests, enable="keyboard-interrupt", verbose=verbose) with expect_error(): - run_tests(chucker_tests, enable="test_timeout", verbose=verbose) + run_tests(chucker.tests, enable="timeout", verbose=verbose) with expect_error(): - run_tests(chucker_tests, enable="test_process_error", verbose=verbose) + run_tests(chucker.tests, enable="process-error", verbose=verbose) with expect_error(): - run_tests(chucker_tests, enable="test_system_exit", verbose=verbose) + run_tests(chucker.tests, enable="system-exit", verbose=verbose) with expect_system_exit(): PlanoTestCommand().main(["--module", "nosuchmodule"]) def run_command(*args): - PlanoTestCommand(chucker_tests).main(args) + PlanoTestCommand(chucker.tests).main(args) run_command("--verbose") + run_command("--quiet") run_command("--list") with expect_system_exit(): @@ -917,10 +1055,42 @@ def time_operations(): assert get_time() - start_time > TINY_INTERVAL - with expect_system_exit(): - with start("sleep 10"): - from plano import _default_sigterm_handler - _default_sigterm_handler(_signal.SIGTERM, None) + start_datetime = get_datetime() + + sleep(TINY_INTERVAL) + + assert get_datetime() - start_datetime > _datetime.timedelta(seconds=TINY_INTERVAL) + + timestamp = format_timestamp() + result = parse_timestamp(timestamp) + assert format_timestamp(result) == timestamp + + result = parse_timestamp(None) + assert result is None + + earlier = get_datetime() + result = format_date() + later = _datetime.datetime.strptime(result, "%d %B %Y") + later = later.replace(tzinfo=_datetime.timezone.utc) + assert later - earlier < _datetime.timedelta(days=1) + + now = get_datetime() + result = format_date(now) + assert result == f"{now.day} {now.strftime('%B')} {now.strftime('%Y')}" + + now = get_datetime() + result = format_time() + later = _datetime.datetime.strptime(result, "%H:%M:%S") + later = later.replace(tzinfo=_datetime.timezone.utc) + assert later - earlier < _datetime.timedelta(seconds=1) + + now = get_datetime() + result = format_time(now) + assert result == f"{now.hour}:{now.strftime('%M')}:{now.strftime('%S')}" + + now = get_datetime() + result = format_time(now, precision="minute") + assert result == f"{now.hour}:{now.strftime('%M')}" result = format_duration(0.1) assert result == "0.1s", result @@ -937,15 +1107,21 @@ def time_operations(): result = format_duration(3600) assert result == "1h", result + with expect_system_exit(): + with start("sleep 10"): + from plano import _default_sigterm_handler + _default_sigterm_handler(_signal.SIGTERM, None) + with Timer() as timer: sleep(TINY_INTERVAL) assert timer.elapsed_time > TINY_INTERVAL assert timer.elapsed_time > TINY_INTERVAL - with expect_timeout(): - with Timer(timeout=TINY_INTERVAL) as timer: - sleep(10) + if not WINDOWS: + with expect_timeout(): + with Timer(timeout=TINY_INTERVAL) as timer: + sleep(10) @test def unique_id_operations(): @@ -986,10 +1162,10 @@ def value_operations(): result = format_empty((1,), "[nothing]") assert result == (1,), result - result = format_not_empty("abc", "[{0}]") + result = format_not_empty("abc", "[{}]") assert result == "[abc]", result - result = format_not_empty({}, "[{0}]") + result = format_not_empty({}, "[{}]") assert result == {}, result result = format_repr(Namespace(a=1, b=2), limit=1) @@ -1009,7 +1185,7 @@ def value_operations(): def yaml_operations(): try: import yaml as _yaml - except ImportError: + except ImportError: # pragma: nocover raise PlanoTestSkipped("PyYAML is not available") with working_dir(): @@ -1029,23 +1205,36 @@ def yaml_operations(): assert input_data == parsed_data, (input_data, parsed_data) assert yaml == emitted_yaml, (yaml, emitted_yaml) + with expect_output(equals=emitted_yaml) as out: + with open(out, "w") as f: + print_yaml(input_data, file=f, end="") + +@command +def prancer(): + notice("Base prancer") + +@command +def vixen(): + prancer() + @test def plano_command(): - if PYTHON2: # pragma: nocover - raise PlanoTestSkipped("The plano command is not supported on Python 2") - with working_dir(): PlanoCommand().main([]) + PlanoCommand(_sys.modules[__name__]).main([]) + + PlanoCommand().main(["-m", "plano.test"]) + + with expect_system_exit(): + PlanoCommand().main(["-m", "nosuchmodule"]) + with working_dir(): - write("Planofile", "garbage") + write(".plano.py", "garbage") with expect_system_exit(): PlanoCommand().main([]) - with expect_system_exit(): - PlanoCommand("no-such-file").main([]) - with expect_system_exit(): PlanoCommand().main(["-f", "no-such-file"]) @@ -1055,15 +1244,6 @@ def run_command(*args): with test_project(): run_command() run_command("--help") - run_command("--quiet") - run_command("--init-only") - - run_command("build") - run_command("install") - run_command("clean") - - with expect_system_exit(): - run_command("build", "--help") with expect_system_exit(): run_command("no-such-command") @@ -1075,6 +1255,8 @@ def run_command(*args): run_command("--help", "no-such-command") run_command("extended-command", "a", "b", "--omega", "z") + run_command("extended-command", "a", "b", "--omega", "z", "--verbose") + run_command("extended-command", "a", "b", "--omega", "z", "--quiet") with expect_system_exit(): run_command("echo") @@ -1084,6 +1266,8 @@ def run_command(*args): run_command("echo", "Hello", "--count", "5") + run_command("echoecho", "Greetings") + with expect_system_exit(): run_command("echo", "Hello", "--count", "not-an-int") @@ -1111,27 +1295,48 @@ def run_command(*args): result = read_json("balderdash.json") assert result == ["bunk", "malarkey", "bollocks"], result -@test -def plano_shell_command(): - python_dir = get_absolute_path("python") + run_command("splasher,balderdash", "claptrap") + result = read_json("splasher.json") + assert result == [1], result + result = read_json("balderdash.json") + assert result == ["claptrap", "malarkey", "rubbish"], result - with working_dir(): - write("script1", "garbage") + with expect_system_exit(): + run_command("no-such-command,splasher") - with expect_exception(NameError): - PlanoShellCommand().main(["script1"]) + with expect_system_exit(): + run_command("splasher,no-such-command-nope") - write("script2", "print_env()") + run_command("dasher", "alpha", "--beta", "123") - PlanoShellCommand().main(["script2"]) + # Gamma is an unexpected arg + with expect_system_exit(): + run_command("dasher", "alpha", "--gamma", "123") - PlanoShellCommand().main(["--command", "print_env()"]) + # Args after "xyz" are extra passthrough args + run_command("dancer", "gamma", "--omega", "xyz", "extra1", "--extra2", "extra3") + result = read_json("dancer.json") + assert result == ["extra1", "--extra2", "extra3"], result - write("command", "from plano import *; PlanoShellCommand().main()") + # Ensure indirect calls (through parent commands) are specialized + run_command("vixen") + assert exists("prancer.json") - with working_env(PYTHONPATH=python_dir): - run("{0} command".format(_sys.executable), input="cprint('Hi!', color='green'); exit()") - run("echo \"cprint('Bi!', color='red')\" | {0} command -".format(_sys.executable), shell=True) + with expect_system_exit(): + run_command("no-parent") - with expect_system_exit(): - PlanoShellCommand().main(["no-such-file"]) + run_command("feta", "--spinach", "oregano") + result = read_json("feta.json") + assert result == "oregano" + + run_command("invisible") + result = read_json("invisible.json") + assert result == "nothing" + + + +def main(): + PlanoTestCommand(_sys.modules[__name__]).main() + +if __name__ == "__main__": # pragma: nocover + main() diff --git a/external/skewer/external/plano/src/plano/command.py b/external/skewer/external/plano/src/plano/command.py new file mode 100644 index 0000000..219f964 --- /dev/null +++ b/external/skewer/external/plano/src/plano/command.py @@ -0,0 +1,511 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * + +import argparse as _argparse +import importlib as _importlib +import inspect as _inspect +import os as _os +import sys as _sys +import traceback as _traceback + +class BaseCommand: + def parse_args(self, args): # pragma: nocover + raise NotImplementedError() + + def configure_logging(self, args): + return "warning", None + + def init(self, args): # pragma: nocover + raise NotImplementedError() + + def run(self): # pragma: nocover + raise NotImplementedError() + + def main(self, args=None): + if args is None: + args = ARGS[1:] + + args = self.parse_args(args) + + assert isinstance(args, _argparse.Namespace), args + + level, output = self.configure_logging(args) + + with logging_enabled(level=level, output=output): + try: + self.init(args) + self.run() + except KeyboardInterrupt: + pass + except PlanoError as e: + if PLANO_DEBUG: # pragma: nocover + error(e) + else: + error(str(e)) + + exit(1) + +class BaseArgumentParser(_argparse.ArgumentParser): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.allow_abbrev = False + self.formatter_class = _argparse.RawDescriptionHelpFormatter + + _capitalize_help(self) + +_plano_command = None + +class PlanoCommand(BaseCommand): + def __init__(self, module=None, description="Run commands defined as Python functions", epilog=None): + self.module = module + self.bound_commands = dict() + self.running_commands = list() + self.passthrough_args = None + self.verbose = False + self.quiet = False + + assert self.module is None or _inspect.ismodule(self.module), self.module + + self.pre_parser = BaseArgumentParser(description=description, add_help=False) + self.pre_parser.add_argument("-h", "--help", action="store_true", + help="Show this help message and exit") + + if self.module is None: + self.pre_parser.add_argument("-f", "--file", help="Load commands from FILE (default '.plano.py')") + self.pre_parser.add_argument("-m", "--module", help="Load commands from MODULE") + + self.parser = _argparse.ArgumentParser(parents=(self.pre_parser,), + description=description, epilog=epilog, + add_help=False, allow_abbrev=False) + + # This is intentionally added after self.pre_parser is passed + # as parent to self.parser, since it is used only in the + # preliminary parsing. + self.pre_parser.add_argument("command", nargs="?", help=_argparse.SUPPRESS) + + global _plano_command + _plano_command = self + + def parse_args(self, args): + pre_args, _ = self.pre_parser.parse_known_args(args) + + if self.module is None: + if pre_args.module is None: + self.module = self._load_file(pre_args.file) + else: + self.module = self._load_module(pre_args.module) + + if self.module is not None: + self._bind_commands(self.module) + + self._process_commands() + + self.preceding_commands = list() + + if pre_args.command is not None and "," in pre_args.command: + names = pre_args.command.split(",") + + for name in names[:-1]: + try: + self.preceding_commands.append(self.bound_commands[name]) + except KeyError: + self.parser.error(f"Command '{name}' is unknown") + + args[args.index(pre_args.command)] = names[-1] + + args, self.passthrough_args = self.parser.parse_known_args(args) + + return args + + def configure_logging(self, args): + if args.command is not None and not self.bound_commands[args.command].passthrough: + if args.verbose: + return "debug", None + + if args.quiet: + return "warning", None + + return "notice", None + + def init(self, args): + self.help = args.help + + self.selected_command = None + self.command_args = list() + self.command_kwargs = dict() + + if args.command is not None: + for command in self.preceding_commands: + command() + + self.selected_command = self.bound_commands[args.command] + + if not self.selected_command.passthrough and self.passthrough_args: + self.parser.error(f"unrecognized arguments: {' '.join(self.passthrough_args)}") + + for param in self.selected_command.parameters.values(): + if param.name == "passthrough_args": + continue + + if param.positional: + if param.multiple: + self.command_args.extend(getattr(args, param.name)) + else: + self.command_args.append(getattr(args, param.name)) + else: + self.command_kwargs[param.name] = getattr(args, param.name) + + if self.selected_command.passthrough: + self.command_kwargs["passthrough_args"] = self.passthrough_args + + def run(self): + if self.help or self.module is None or self.selected_command is None: + self.parser.print_help() + return + + with Timer() as timer: + self.selected_command(*self.command_args, **self.command_kwargs) + + if not self.quiet: + cprint("OK", color="green", file=_sys.stderr, end="") + cprint(" ({})".format(format_duration(timer.elapsed_time)), color="magenta", file=_sys.stderr) + + def _load_module(self, name): + try: + return _importlib.import_module(name) + except ImportError: + exit("Module '{}' not found", name) + + def _load_file(self, path): + if path is not None and is_dir(path): + path = self._find_file(path) + + if path is not None and not is_file(path): + exit("File '{}' not found", path) + + if path is None: + path = self._find_file(get_current_dir()) + + if path is None: + return + + debug("Loading '{}'", path) + + _sys.path.insert(0, join(get_parent_dir(path), "python")) + + spec = _importlib.util.spec_from_file_location("_plano", path) + module = _importlib.util.module_from_spec(spec) + _sys.modules["_plano"] = module + + try: + spec.loader.exec_module(module) + except Exception as e: + error(e) + exit("Failure loading {}: {}", path, str(e)) + + return module + + def _find_file(self, dir): + # Planofile and .planofile remain temporarily for backward compatibility + for name in (".plano.py", "Planofile", ".planofile"): + path = join(dir, name) + + if is_file(path): + return path + + def _bind_commands(self, module): + for var in vars(module).values(): + if callable(var) and var.__class__.__name__ == "Command": + self.bound_commands[var.name] = var + + def _process_commands(self): + subparsers = self.parser.add_subparsers(title="commands", dest="command", metavar="{command}") + + for command in self.bound_commands.values(): + # This doesn't work yet, but in the future it might. + # https://bugs.python.org/issue22848 + # + # help = _argparse.SUPPRESS if command.hidden else command.help + + help = "[internal]" if command.hidden else command.help + add_help = False if command.passthrough else True + description = nvl(command.description, command.help) + + subparser = subparsers.add_parser(command.name, help=help, add_help=add_help, description=description, + formatter_class=_argparse.RawDescriptionHelpFormatter) + + if not command.passthrough: + subparser.add_argument("--verbose", action="store_true", + help="Print detailed logging to the console") + subparser.add_argument("--quiet", action="store_true", + help="Print no logging to the console") + + for param in command.parameters.values(): + if not command.passthrough and param.name in ("verbose", "quiet"): + continue + + if param.positional: + if param.multiple: + subparser.add_argument(param.name, metavar=param.metavar, type=param.type, help=param.help, + nargs="*") + elif param.optional: + subparser.add_argument(param.name, metavar=param.metavar, type=param.type, help=param.help, + nargs="?", default=param.default) + else: + subparser.add_argument(param.name, metavar=param.metavar, type=param.type, help=param.help) + else: + flag_args = list() + + if param.short_option is not None: + flag_args.append("-{}".format(param.short_option)) + + flag_args.append("--{}".format(param.display_name)) + + help = param.help + + if param.default not in (None, False): + if help is None: + help = "Default value is {}".format(repr(param.default)) + else: + help += " (default {})".format(repr(param.default)) + + if param.default is False: + subparser.add_argument(*flag_args, dest=param.name, default=param.default, action="store_true", + help=help) + else: + subparser.add_argument(*flag_args, dest=param.name, default=param.default, + metavar=param.metavar, type=param.type, help=help) + + _capitalize_help(subparser) + +_command_help = { + "build": "Build artifacts from source", + "clean": "Clean up the source tree", + "dist": "Generate distribution artifacts", + "install": "Install the built artifacts on your system", + "test": "Run the tests", + "coverage": "Run the tests and measure code coverage", +} + +def command(_function=None, name=None, parameters=None, parent=None, passthrough=False, hidden=False): + class Command: + def __init__(self, function): + self.function = function + self.module = _inspect.getmodule(self.function) + + self.name = name + self.parent = parent + + if self.parent is None: + # Strip leading and trailing underscores and convert + # remaining underscores to hyphens + default = self.function.__name__.strip("_").replace("_", "-") + + self.name = nvl(self.name, default) + self.parameters = self._process_parameters(parameters) + self.passthrough = passthrough + else: + assert parameters is None + + self.name = nvl(self.name, self.parent.name) + self.parameters = self.parent.parameters + self.passthrough = self.parent.passthrough + + doc = _inspect.getdoc(self.function) + + if doc is None: + self.help = _command_help.get(self.name) + self.description = self.help + else: + self.help = doc.split("\n")[0] + self.description = doc + + if self.parent is not None: + self.help = nvl(self.help, self.parent.help) + self.description = nvl(self.description, self.parent.description) + + self.hidden = hidden + + debug("Defining {}", self) + + for param in self.parameters.values(): + debug(" {}", str(param).capitalize()) + + def __repr__(self): + return "command '{}:{}'".format(self.module.__name__, self.name) + + def _process_parameters(self, cparams): + # CommandParameter objects from the @command decorator + cparams_in = {x.name: x for x in nvl(cparams, ())} + cparams_out = dict() + + # Parameter objects from the function signature + sig = _inspect.signature(self.function) + sparams = list(sig.parameters.values()) + + if len(sparams) == 2 and sparams[0].name == "args" and sparams[1].name == "kwargs": + # Don't try to derive command parameters from *args and **kwargs + return cparams_in + + for sparam in sparams: + try: + cparam = cparams_in[sparam.name] + except KeyError: + cparam = CommandParameter(sparam.name) + + if sparam.kind is sparam.POSITIONAL_ONLY: # pragma: nocover + if sparam.positional is None: + cparam.positional = True + elif sparam.kind is sparam.POSITIONAL_OR_KEYWORD and sparam.default is sparam.empty: + if cparam.positional is None: + cparam.positional = True + elif sparam.kind is sparam.POSITIONAL_OR_KEYWORD and sparam.default is not sparam.empty: + cparam.optional = True + cparam.default = sparam.default + elif sparam.kind is sparam.VAR_POSITIONAL: + if cparam.positional is None: + cparam.positional = True + cparam.multiple = True + elif sparam.kind is sparam.VAR_KEYWORD: + continue + elif sparam.kind is sparam.KEYWORD_ONLY: + cparam.optional = True + cparam.default = sparam.default + else: # pragma: nocover + raise NotImplementedError(sparam.kind) + + if cparam.type is None and cparam.default not in (None, False): # XXX why false? + cparam.type = type(cparam.default) + + cparams_out[cparam.name] = cparam + + return cparams_out + + def __call__(self, *args, **kwargs): + from .command import _plano_command, PlanoCommand + assert isinstance(_plano_command, PlanoCommand), _plano_command + + app = _plano_command + command = app.bound_commands[self.name] + + if command is not self: + # The command bound to this name has been overridden. + # This happens when a parent command invokes a peer + # command that is overridden. + + command(*args, **kwargs) + + return + + debug("Running {} {} {}".format(self, args, kwargs)) + + app.running_commands.append(self) + + if not app.quiet: + dashes = "--- " * (len(app.running_commands) - 1) + display_args = list(self._get_display_args(args, kwargs)) + + with console_color("magenta", file=_sys.stderr): + eprint("{}--> {}".format(dashes, self.name), end="") + + if display_args: + eprint(" ({})".format(", ".join(display_args)), end="") + + eprint() + + self.function(*args, **kwargs) + + if not app.quiet: + cprint("{}<-- {}".format(dashes, self.name), color="magenta", file=_sys.stderr) + + app.running_commands.pop() + + def _get_display_args(self, args, kwargs): + for i, param in enumerate(self.parameters.values()): + if param.positional: + if param.multiple: + for va in args[i:]: + yield repr(va) + elif param.optional: + value = args[i] + + if value == param.default: + continue + + yield repr(value) + else: + yield repr(args[i]) + else: + value = kwargs.get(param.name, param.default) + + if value == param.default: + continue + + if value in (True, False): + value = str(value).lower() + else: + value = repr(value) + + yield "{}={}".format(param.display_name, value) + + if _function is None: + return Command + else: + return Command(_function) + +def parent(*args, **kwargs): + try: + f_locals = _inspect.stack()[2].frame.f_locals + parent_fn = f_locals["self"].parent.function + except: + fail("Missing parent command") + + parent_fn(*args, **kwargs) + +class CommandParameter: + def __init__(self, name, display_name=None, type=None, metavar=None, help=None, short_option=None, default=None, positional=None): + self.name = name + self.display_name = nvl(display_name, self.name.replace("_", "-")) + self.type = type + self.metavar = nvl(metavar, self.display_name.upper()) + self.help = help + self.short_option = short_option + self.default = default + self.positional = positional + + self.optional = False + self.multiple = False + + def __repr__(self): + return "parameter '{}' (default {})".format(self.name, repr(self.default)) + +# Patch the default help text +def _capitalize_help(parser): + try: + for action in parser._actions: + if action.help and action.help is not _argparse.SUPPRESS: + action.help = capitalize(action.help) + except: # pragma: nocover + pass + +def _main(): # pragma: nocover + PlanoCommand().main() diff --git a/external/skewer/external/plano/src/plano/github.py b/external/skewer/external/plano/src/plano/github.py new file mode 100644 index 0000000..e1714b5 --- /dev/null +++ b/external/skewer/external/plano/src/plano/github.py @@ -0,0 +1,80 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * + +_html_template = """ + + + + + + +
+ +@content@ + +
+ + +""".strip() + +def convert_github_markdown(markdown): + json = emit_json({"text": markdown}) + content = http_post("https://api.github.com/markdown", json, content_type="application/json") + + # Remove the "user-content-" prefix from internal anchors + content = content.replace("id=\"user-content-", "id=\"") + + return _html_template.replace("@content@", content) + +def update_external_from_github(dir, owner, repo, ref="main"): + dir = get_absolute_path(dir) + make_parent_dir(dir) + + url = f"https://github.com/{owner}/{repo}/archive/{ref}.tar.gz" + + with temp_file() as temp: + assert exists(temp) + + http_get(url, output_file=temp) + + with working_dir(quiet=True): + extract_archive(temp) + + extracted_dir = list_dir()[0] + assert is_dir(extracted_dir) + + replace(dir, extracted_dir) diff --git a/external/skewer/external/plano/src/plano/main.py b/external/skewer/external/plano/src/plano/main.py new file mode 100644 index 0000000..eaed213 --- /dev/null +++ b/external/skewer/external/plano/src/plano/main.py @@ -0,0 +1,1788 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import base64 as _base64 +import binascii as _binascii +import code as _code +import datetime as _datetime +import fnmatch as _fnmatch +import getpass as _getpass +import json as _json +import os as _os +import pprint as _pprint +import pkgutil as _pkgutil +import random as _random +import re as _re +import shlex as _shlex +import shutil as _shutil +import signal as _signal +import socket as _socket +import subprocess as _subprocess +import sys as _sys +import tempfile as _tempfile +import time as _time +import traceback as _traceback +import urllib as _urllib +import urllib.parse as _urllib_parse +import uuid as _uuid + +_max = max + +## Exceptions + +class PlanoException(Exception): + pass + +class PlanoError(PlanoException): + pass + +class PlanoTimeout(PlanoException): + pass + +## Global variables + +ENV = _os.environ +ARGS = _sys.argv + +STDIN = _sys.stdin +STDOUT = _sys.stdout +STDERR = _sys.stderr +DEVNULL = _os.devnull + +LINUX = _sys.platform == "linux" +WINDOWS = _sys.platform in ("win32", "cygwin") + +PLANO_DEBUG = "PLANO_DEBUG" in ENV +PLANO_COLOR = "PLANO_COLOR" in ENV + +## Archive operations + +def make_archive(input_dir, output_file=None, quiet=False): + check_program("tar") + + archive_stem = get_base_name(input_dir) + + if output_file is None: + # tar on Windows needs this + base = join(get_current_dir(), archive_stem) + base = base.replace("\\", "/") + + output_file = f"{base}.tar.gz" + + _notice(quiet, "Making archive {} from directory {}", repr(output_file), repr(input_dir)) + + with working_dir(get_parent_dir(input_dir), quiet=True): + run(f"tar -czf {output_file} {archive_stem}", quiet=True) + + return output_file + +def extract_archive(input_file, output_dir=None, quiet=False): + check_program("tar") + + if output_dir is None: + output_dir = get_current_dir() + + _notice(quiet, "Extracting archive {} to directory {}", repr(input_file), repr(output_dir)) + + input_file = get_absolute_path(input_file) + + # tar on Windows needs this + input_file = input_file.replace("\\", "/") + + with working_dir(output_dir, quiet=True): + run(f"tar -xf {input_file}", quiet=True) + + return output_dir + +def rename_archive(input_file, new_archive_stem, quiet=False): + _notice(quiet, "Renaming archive {} with stem {}", repr(input_file), repr(new_archive_stem)) + + output_dir = get_absolute_path(get_parent_dir(input_file)) + output_file = "{}.tar.gz".format(join(output_dir, new_archive_stem)) + + # tar on Windows needs this + output_file = output_file.replace("\\", "/") + + input_file = get_absolute_path(input_file) + + with working_dir(quiet=True): + extract_archive(input_file, quiet=True) + + input_name = list_dir()[0] + input_dir = move(input_name, new_archive_stem, quiet=True) + + make_archive(input_dir, output_file=output_file, quiet=True) + + remove(input_file, quiet=True) + + return output_file + +## Console operations + +def flush(): + _sys.stdout.flush() + _sys.stderr.flush() + +def eprint(*args, **kwargs): + print(*args, file=_sys.stderr, **kwargs) + +def pprint(*args, **kwargs): + args = [pformat(x) for x in args] + print(*args, **kwargs) + +_color_codes = { + "black": "\u001b[30", + "red": "\u001b[31", + "green": "\u001b[32", + "yellow": "\u001b[33", + "blue": "\u001b[34", + "magenta": "\u001b[35", + "cyan": "\u001b[36", + "white": "\u001b[37", + "gray": "\u001b[90", +} + +_color_reset = "\u001b[0m" + +def _get_color_code(color, bright): + elems = [_color_codes[color]] + + if bright: + elems.append(";1") + + elems.append("m") + + return "".join(elems) + +def _is_color_enabled(file): + return PLANO_COLOR or hasattr(file, "isatty") and file.isatty() + +class console_color: + def __init__(self, color=None, bright=False, file=_sys.stdout): + self.file = file + self.color_code = None + + if (color, bright) != (None, False): + self.color_code = _get_color_code(color, bright) + + self.enabled = self.color_code is not None and _is_color_enabled(self.file) + + def __enter__(self): + if self.enabled: + print(self.color_code, file=self.file, end="", flush=True) + + def __exit__(self, exc_type, exc_value, traceback): + if self.enabled: + print(_color_reset, file=self.file, end="", flush=True) + +def cformat(value, color=None, bright=False, file=_sys.stdout): + if (color, bright) != (None, False) and _is_color_enabled(file): + return "".join((_get_color_code(color, bright), value, _color_reset)) + else: + return value + +def cprint(*args, **kwargs): + color = kwargs.pop("color", "white") + bright = kwargs.pop("bright", False) + file = kwargs.get("file", _sys.stdout) + + with console_color(color, bright=bright, file=file): + print(*args, **kwargs) + +class output_redirected: + def __init__(self, output, quiet=False): + self.output = output + self.quiet = quiet + + def __enter__(self): + flush() + + _notice(self.quiet, "Redirecting output to file {}", repr(self.output)) + + if is_string(self.output): + output = open(self.output, "w") + + self.prev_stdout, self.prev_stderr = _sys.stdout, _sys.stderr + _sys.stdout, _sys.stderr = output, output + + def __exit__(self, exc_type, exc_value, traceback): + flush() + + _sys.stdout, _sys.stderr = self.prev_stdout, self.prev_stderr + +try: + breakpoint +except NameError: # pragma: nocover + def breakpoint(): + import pdb + pdb.set_trace() + +def repl(locals): # pragma: nocover + _code.InteractiveConsole(locals=locals).interact() + +def print_properties(props, file=None): + size = max([len(x[0]) for x in props]) + + for prop in props: + name = "{}:".format(prop[0]) + template = "{{:<{}}} ".format(size + 1) + + print(template.format(name), prop[1], end="", file=file) + + for value in prop[2:]: + print(" {}".format(value), end="", file=file) + + print(file=file) + +## Directory operations + +def find(dirs=None, include="*", exclude=[]): + if dirs is None: + dirs = "." + + if is_string(dirs): + dirs = [dirs] + + if is_string(include): + include = [include] + + if is_string(exclude): + exclude = [exclude] + + found = set() + + for dir in dirs: + for root, dir_names, file_names in _os.walk(dir, followlinks=True): + names = dir_names + file_names + + for include_pattern in include: + names = _fnmatch.filter(names, include_pattern) + + for exclude_pattern in exclude: + for name in _fnmatch.filter(names, exclude_pattern): + names.remove(name) + + if root.startswith("./"): + root = remove_prefix(root, "./") + elif root == ".": + root = "" + + found.update([join(root, x) for x in names]) + + return sorted(found) + +def make_dir(dir, quiet=False): + if dir == "": + return dir + + if not exists(dir): + _notice(quiet, "Making directory '{}'", dir) + _os.makedirs(dir) + + return dir + +def make_parent_dir(path, quiet=False): + return make_dir(get_parent_dir(path), quiet=quiet) + +# Returns the current working directory so you can change it back +def change_dir(dir, quiet=False): + _debug(quiet, "Changing directory to {}", repr(dir)) + + prev_dir = get_current_dir() + + if not dir: + return prev_dir + + _os.chdir(dir) + + return prev_dir + +def list_dir(dir=None, include="*", exclude=[]): + if dir is None: + dir = get_current_dir() + else: + dir = expand(dir) + + assert is_dir(dir), dir + + if is_string(include): + include = [include] + + if is_string(exclude): + exclude = [exclude] + + names = _os.listdir(dir) + + for include_pattern in include: + names = _fnmatch.filter(names, include_pattern) + + for exclude_pattern in exclude: + for name in _fnmatch.filter(names, exclude_pattern): + names.remove(name) + + return sorted(names) + +def print_dir(dir=None, include="*", exclude=[]): + if dir is None: + dir = get_current_dir() + else: + dir = expand(dir) + + names = list_dir(dir=dir, include=include, exclude=exclude) + + print("{}:".format(get_absolute_path(dir))) + + if names: + for name in names: + print(f" {name}") + else: + print(" [none]") + +# No args constructor gets a temp dir +class working_dir: + def __init__(self, dir=None, quiet=False): + self.dir = dir + self.prev_dir = None + self.remove = False + self.quiet = quiet + + if self.dir is None: + self.dir = make_temp_dir() + self.remove = True + else: + self.dir = expand(self.dir) + + def __enter__(self): + if self.dir == ".": + return + + _notice(self.quiet, "Entering directory {}", repr(get_absolute_path(self.dir))) + + make_dir(self.dir, quiet=True) + + self.prev_dir = change_dir(self.dir, quiet=True) + + return self.dir + + def __exit__(self, exc_type, exc_value, traceback): + if self.dir == ".": + return + + _debug(self.quiet, "Returning to directory {}", repr(get_absolute_path(self.prev_dir))) + + change_dir(self.prev_dir, quiet=True) + + if self.remove: + remove(self.dir, quiet=True) + +## Environment operations + +def join_path_var(*paths): + return _os.pathsep.join(unique(skip(paths))) + +def get_current_dir(): + return _os.getcwd() + +def get_home_dir(user=None): + return _os.path.expanduser("~{}".format(user or "")) + +def get_user(): + return _getpass.getuser() + +def get_hostname(): + return _socket.gethostname() + +def get_program_name(command=None): + if command is None: + args = ARGS + else: + args = command.split() + + for arg in args: + if "=" not in arg: + return get_base_name(arg) + +def which(program_name): + return _shutil.which(program_name) + +def check_env(var, message=None): + if var not in _os.environ: + if message is None: + message = "Environment variable {} is not set".format(repr(var)) + + raise PlanoError(message) + +def check_module(module, message=None): + if _pkgutil.find_loader(module) is None: + if message is None: + message = "Python module {} is not found".format(repr(module)) + + raise PlanoError(message) + +def check_program(program, message=None): + if which(program) is None: + if message is None: + message = "Program {} is not found".format(repr(program)) + + raise PlanoError(message) + +class working_env: + def __init__(self, **vars): + self.amend = vars.pop("amend", True) + self.vars = vars + + def __enter__(self): + self.prev_vars = dict(_os.environ) + + if not self.amend: + for name, value in list(_os.environ.items()): + if name not in self.vars: + del _os.environ[name] + + for name, value in self.vars.items(): + _os.environ[name] = str(value) + + def __exit__(self, exc_type, exc_value, traceback): + for name, value in self.prev_vars.items(): + _os.environ[name] = value + + for name, value in self.vars.items(): + if name not in self.prev_vars: + del _os.environ[name] + +class working_module_path: + def __init__(self, path, amend=True): + if is_string(path): + if not is_absolute(path): + path = get_absolute_path(path) + + path = [path] + + if amend: + path = path + _sys.path + + self.path = path + + def __enter__(self): + self.prev_path = _sys.path + _sys.path = self.path + + def __exit__(self, exc_type, exc_value, traceback): + _sys.path = self.prev_path + +def print_env(file=None): + props = ( + ("ARGS", ARGS), + ("ENV['PATH']", ENV.get("PATH")), + ("ENV['PYTHONPATH']", ENV.get("PYTHONPATH")), + ("sys.executable", _sys.executable), + ("sys.path", _sys.path), + ("sys.version", _sys.version.replace("\n", "")), + ("get_current_dir()", get_current_dir()), + ("get_home_dir()", get_home_dir()), + ("get_hostname()", get_hostname()), + ("get_program_name()", get_program_name()), + ("get_user()", get_user()), + ("plano.__file__", __file__), + ("which('plano')", which("plano")), + ) + + print_properties(props, file=file) + +def print_stack(file=None): + _traceback.print_stack(file=file) + +## File operations + +def touch(file, quiet=False): + file = expand(file) + + _notice(quiet, "Touching {}", repr(file)) + + try: + _os.utime(file, None) + except OSError: + append(file, "") + + return file + +# symlinks=True - Preserve symlinks +# inside=True - Place from_path inside to_path if to_path is a directory +def copy(from_path, to_path, symlinks=True, inside=True, quiet=False): + from_path = expand(from_path) + to_path = expand(to_path) + + _notice(quiet, "Copying {} to {}", repr(from_path), repr(to_path)) + + if is_dir(to_path) and inside: + to_path = join(to_path, get_base_name(from_path)) + else: + make_parent_dir(to_path, quiet=True) + + if is_dir(from_path): + for name in list_dir(from_path): + copy(join(from_path, name), join(to_path, name), symlinks=symlinks, inside=False, quiet=True) + + _shutil.copystat(from_path, to_path) + elif is_link(from_path) and symlinks: + make_link(to_path, read_link(from_path), quiet=True) + else: + _shutil.copy2(from_path, to_path) + + return to_path + +# inside=True - Place from_path inside to_path if to_path is a directory +def move(from_path, to_path, inside=True, quiet=False): + from_path = expand(from_path) + to_path = expand(to_path) + + _notice(quiet, "Moving {} to {}", repr(from_path), repr(to_path)) + + to_path = copy(from_path, to_path, inside=inside, quiet=True) + remove(from_path, quiet=True) + + return to_path + +def replace(path, replacement, quiet=False): + path = expand(path) + replacement = expand(replacement) + + _notice(quiet, "Replacing {} with {}", repr(path), repr(replacement)) + + with temp_dir() as backup_dir: + backup = join(backup_dir, "backup") + backup_created = False + + if exists(path): + move(path, backup, quiet=True) + backup_created = True + + try: + move(replacement, path, quiet=True) + except OSError: + notice("Removing") + remove(path, quiet=True) + + if backup_created: + move(backup, path, quiet=True) + + raise + + assert not exists(replacement), replacement + assert exists(path), path + + return path + +def remove(paths, quiet=False): + if is_string(paths): + paths = [paths] + + for path in paths: + path = expand(path) + + if not exists(path): + continue + + _debug(quiet, "Removing {}", repr(path)) + + if is_dir(path): + _shutil.rmtree(path, ignore_errors=True) + else: + _os.remove(path) + +def get_file_size(file): + file = expand(file) + return _os.path.getsize(file) + +## IO operations + +def read(file): + file = expand(file) + + with open(file) as f: + return f.read() + +def write(file, string): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + f.write(string) + + return file + +def append(file, string): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "a") as f: + f.write(string) + + return file + +def prepend(file, string): + file = expand(file) + + orig = read(file) + + return write(file, string + orig) + +def tail(file, count): + file = expand(file) + return "".join(tail_lines(file, count)) + +def read_lines(file): + file = expand(file) + + with open(file) as f: + return f.readlines() + +def write_lines(file, lines): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + f.writelines(lines) + + return file + +def append_lines(file, lines): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "a") as f: + f.writelines(lines) + + return file + +def prepend_lines(file, lines): + file = expand(file) + + orig_lines = read_lines(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + f.writelines(lines) + f.writelines(orig_lines) + + return file + +def tail_lines(file, count): + assert count >= 0, count + + lines = read_lines(file) + + return lines[-count:] + +def string_replace_file(file, expr, replacement, count=0): + file = expand(file) + return write(file, string_replace(read(file), expr, replacement, count=count)) + +def concatenate(file, input_files): + file = expand(file) + + assert file not in input_files + + make_parent_dir(file, quiet=True) + + with open(file, "wb") as f: + for input_file in input_files: + if not exists(input_file): + continue + + with open(input_file, "rb") as inf: + _shutil.copyfileobj(inf, f) + + return file + +## Iterable operations + +def unique(iterable): + return list(dict.fromkeys(iterable).keys()) + +def skip(iterable, values=(None, "", (), [], {})): + if is_scalar(values): + values = [values] + + items = list() + + for item in iterable: + if item not in values: + items.append(item) + + return items + +## JSON operations + +def read_json(file): + file = expand(file) + + with open(file) as f: + return _json.load(f) + +def write_json(file, data): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + _json.dump(data, f, indent=4, separators=(",", ": "), sort_keys=True) + + return file + +def parse_json(json): + return _json.loads(json) + +def emit_json(data): + return _json.dumps(data, indent=4, separators=(",", ": "), sort_keys=True) + +def print_json(data, **kwargs): + print(emit_json(data), **kwargs) + +## HTTP operations + +def _run_curl(method, url, content=None, content_file=None, content_type=None, output_file=None, insecure=False, + user=None, password=None, quiet=False): + check_program("curl") + + _notice(quiet, f"Sending {method} request to '{url}'") + + args = ["curl", "-sfL"] + + if method != "GET": + args.extend(["-X", method]) + + if content is not None: + assert content_file is None + args.extend(["-H", "Expect:", "-d", "@-"]) + + if content_file is not None: + assert content is None, content + args.extend(["-H", "Expect:", "-d", f"@{content_file}"]) + + if content_type is not None: + args.extend(["-H", f"'Content-Type: {content_type}'"]) + + if output_file is not None: + args.extend(["-o", output_file]) + + if insecure: + args.append("--insecure") + + if user is not None: + assert password is not None + args.extend(["--user", f"{user}:{password}"]) + + args.append(url) + + if output_file is not None: + make_parent_dir(output_file, quiet=True) + + proc = run(args, stdin=_subprocess.PIPE, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE, + input=content, check=False, quiet=True) + + if proc.exit_code > 0: + raise PlanoProcessError(proc) + + if output_file is None: + return proc.stdout_result + +def http_get(url, output_file=None, insecure=False, user=None, password=None, quiet=False): + return _run_curl("GET", url, output_file=output_file, insecure=insecure, user=user, password=password, quiet=quiet) + +def http_get_json(url, insecure=False, user=None, password=None, quiet=False): + return parse_json(http_get(url, insecure=insecure, user=user, password=password, quiet=quiet)) + +def http_put(url, content, content_type=None, insecure=False, user=None, password=None, quiet=False): + _run_curl("PUT", url, content=content, content_type=content_type, insecure=insecure, user=user, password=password, + quiet=quiet) + +def http_put_file(url, content_file, content_type=None, insecure=False, user=None, password=None, quiet=False): + _run_curl("PUT", url, content_file=content_file, content_type=content_type, insecure=insecure, user=user, + password=password, quiet=quiet) + +def http_put_json(url, data, insecure=False, user=None, password=None, quiet=False): + http_put(url, emit_json(data), content_type="application/json", insecure=insecure, user=user, password=password, + quiet=quiet) + +def http_post(url, content, content_type=None, output_file=None, insecure=False, user=None, password=None, + quiet=False): + return _run_curl("POST", url, content=content, content_type=content_type, output_file=output_file, + insecure=insecure, user=user, password=password, quiet=quiet) + +def http_post_file(url, content_file, content_type=None, output_file=None, insecure=False, user=None, password=None, + quiet=False): + return _run_curl("POST", url, content_file=content_file, content_type=content_type, output_file=output_file, + insecure=insecure, user=user, password=password, quiet=quiet) + +def http_post_json(url, data, insecure=False, user=None, password=None, quiet=False): + return parse_json(http_post(url, emit_json(data), content_type="application/json", insecure=insecure, user=user, + password=password, quiet=quiet)) + +## Link operations + +def make_link(path: str, linked_path: str, quiet=False) -> str: + _notice(quiet, "Making symlink {} to {}", repr(path), repr(linked_path)) + + make_parent_dir(path, quiet=True) + remove(path, quiet=True) + + _os.symlink(linked_path, path) + + return path + +def read_link(path): + return _os.readlink(path) + +## Logging operations + +_logging_levels = ( + "debug", + "notice", + "warning", + "error", + "disabled", +) + +_DEBUG = _logging_levels.index("debug") +_NOTICE = _logging_levels.index("notice") +_WARNING = _logging_levels.index("warning") +_ERROR = _logging_levels.index("error") +_DISABLED = _logging_levels.index("disabled") + +_logging_output = None +_logging_threshold = _NOTICE +_logging_contexts = list() + +def enable_logging(level="notice", output=None, quiet=False): + assert level in _logging_levels, level + + _notice(quiet, "Enabling logging (level={}, output={})", repr(level), repr(nvl(output, "stderr"))) + + global _logging_threshold + _logging_threshold = _logging_levels.index(level) + + if is_string(output): + output = open(output, "w") + + global _logging_output + _logging_output = output + +def disable_logging(quiet=False): + _notice(quiet, "Disabling logging") + + global _logging_threshold + _logging_threshold = _DISABLED + +class logging_enabled: + def __init__(self, level="notice", output=None): + self.level = level + self.output = output + + def __enter__(self): + self.prev_level = _logging_levels[_logging_threshold] + self.prev_output = _logging_output + + if self.level == "disabled": + disable_logging(quiet=True) + else: + enable_logging(level=self.level, output=self.output, quiet=True) + + def __exit__(self, exc_type, exc_value, traceback): + if self.prev_level == "disabled": + disable_logging(quiet=True) + else: + enable_logging(level=self.prev_level, output=self.prev_output, quiet=True) + +class logging_disabled(logging_enabled): + def __init__(self): + super().__init__(level="disabled") + +class logging_context: + def __init__(self, name): + self.name = name + + def __enter__(self): + _logging_contexts.append(self.name) + + def __exit__(self, exc_type, exc_value, traceback): + _logging_contexts.pop() + +def fail(message, *args): + if isinstance(message, BaseException): + if not isinstance(message, PlanoError): + error(message) + + raise message + + if args: + message = message.format(*args) + + raise PlanoError(message) + +def error(message, *args): + log(_ERROR, message, *args) + +def warning(message, *args): + log(_WARNING, message, *args) + +def notice(message, *args): + log(_NOTICE, message, *args) + +def debug(message, *args): + log(_DEBUG, message, *args) + +def log(level, message, *args): + if is_string(level): + level = _logging_levels.index(level) + + if _logging_threshold <= level: + _print_message(level, message, args) + +def _print_message(level, message, args): + line = list() + out = nvl(_logging_output, _sys.stderr) + + program_text = "{}:".format(get_program_name()) + + line.append(cformat(program_text, color="gray")) + + level_text = "{}:".format(_logging_levels[level]) + level_color = ("white", "cyan", "yellow", "red", None)[level] + level_bright = (False, False, False, True, False)[level] + + line.append(cformat(level_text, color=level_color, bright=level_bright)) + + for name in _logging_contexts: + line.append(cformat("{}:".format(name), color="yellow")) + + if isinstance(message, BaseException): + exception = message + + line.append(str(exception)) + + print(" ".join(line), file=out) + + if hasattr(exception, "__traceback__"): + _traceback.print_exception(type(exception), exception, exception.__traceback__, file=out) + else: + message = str(message) + + if args: + message = message.format(*args) + + line.append(capitalize(message)) + + print(" ".join(line), file=out) + + out.flush() + +def _notice(quiet, message, *args): + if quiet: + debug(message, *args) + else: + notice(message, *args) + +def _debug(quiet, message, *args): + if not quiet: + debug(message, *args) + +## Path operations + +def expand(path): + path = _os.path.expanduser(path) + path = _os.path.expandvars(path) + + return path + +def get_absolute_path(path): + path = expand(path) + return _os.path.abspath(path) + +def normalize_path(path): + path = expand(path) + return _os.path.normpath(path) + +def get_real_path(path): + path = expand(path) + return _os.path.realpath(path) + +def get_relative_path(path, start=None): + path = expand(path) + return _os.path.relpath(path, start=start) + +def get_file_url(path): + path = expand(path) + return "file:{}".format(get_absolute_path(path)) + +def exists(path): + path = expand(path) + return _os.path.lexists(path) + +def is_absolute(path): + path = expand(path) + return _os.path.isabs(path) + +def is_dir(path): + path = expand(path) + return _os.path.isdir(path) + +def is_file(path): + path = expand(path) + return _os.path.isfile(path) + +def is_link(path): + path = expand(path) + return _os.path.islink(path) + +def join(*paths): + paths = [expand(x) for x in paths] + + path = _os.path.join(*paths) + path = normalize_path(path) + + return path + +def split(path): + path = expand(path) + path = normalize_path(path) + parent, child = _os.path.split(path) + + return parent, child + +def split_extension(path): + path = expand(path) + path = normalize_path(path) + root, ext = _os.path.splitext(path) + + return root, ext + +def get_parent_dir(path): + path = expand(path) + path = normalize_path(path) + parent, child = split(path) + + return parent + +def get_base_name(path): + path = expand(path) + path = normalize_path(path) + parent, name = split(path) + + return name + +def get_name_stem(file): + file = expand(file) + name = get_base_name(file) + + if name.endswith(".tar.gz"): + name = name[:-3] + + stem, ext = split_extension(name) + + return stem + +def get_name_extension(file): + file = expand(file) + name = get_base_name(file) + stem, ext = split_extension(name) + + return ext + +def _check_path(path, test_func, message): + path = expand(path) + + if not test_func(path): + parent_dir = get_parent_dir(path) + + if is_dir(parent_dir): + found_paths = ", ".join([repr(x) for x in list_dir(parent_dir)]) + message = "{}. The parent directory contains: {}".format(message.format(repr(path)), found_paths) + else: + message = "{}".format(message.format(repr(path))) + + raise PlanoError(message) + +def check_exists(path): + path = expand(path) + _check_path(path, exists, "File or directory {} not found") + +def check_file(path): + path = expand(path) + _check_path(path, is_file, "File {} not found") + +def check_dir(path): + path = expand(path) + _check_path(path, is_dir, "Directory {} not found") + +def await_exists(path, timeout=30, quiet=False): + path = expand(path) + + _notice(quiet, "Waiting for path {} to exist", repr(path)) + + timeout_message = "Timed out waiting for path {} to exist".format(path) + period = 0.03125 + + with Timer(timeout=timeout, timeout_message=timeout_message) as timer: + while True: + try: + check_exists(path) + except PlanoError: + sleep(period, quiet=True) + period = min(1, period * 2) + else: + return + +## Port operations + +def get_random_port(min=49152, max=65535): + ports = [_random.randint(min, max) for _ in range(3)] + + for port in ports: + try: + check_port(port) + except PlanoError: + return port + + raise PlanoError("Random ports unavailable") + +def check_port(port, host="localhost"): + sock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) + sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + + if sock.connect_ex((host, port)) != 0: + raise PlanoError("Port {} (host {}) is not reachable".format(repr(port), repr(host))) + +def await_port(port, host="localhost", timeout=30, quiet=False): + _notice(quiet, "Waiting for port {}", port) + + if is_string(port): + port = int(port) + + timeout_message = "Timed out waiting for port {} to open".format(port) + period = 0.03125 + + with Timer(timeout=timeout, timeout_message=timeout_message) as timer: + while True: + try: + check_port(port, host=host) + except PlanoError: + sleep(period, quiet=True) + period = min(1, period * 2) + else: + return + +## Process operations + +def get_process_id(): + return _os.getpid() + +def _format_command(command, represent=True): + if is_string(command): + args = _shlex.split(command) + else: + args = command + + args = [expand(str(x)) for x in args] + command = " ".join(args) + + if represent: + return repr(command) + else: + return command + +# quiet=False - Don't log at notice level +# stash=False - No output unless there is an error +# output= - Send stdout and stderr to a file +# stdin= - XXX +# stdout= - Send stdout to a file +# stderr= - Send stderr to a file +# shell=False - XXX +def start(command, stdin=None, stdout=None, stderr=None, output=None, shell=False, stash=False, quiet=False): + _notice(quiet, "Starting a new process (command {})", _format_command(command)) + + if output is not None: + stdout, stderr = output, output + + if is_string(stdin): + stdin = expand(stdin) + stdin = open(stdin, "r") + + if is_string(stdout): + stdout = expand(stdout) + stdout = open(stdout, "w") + + if is_string(stderr): + stderr = expand(stderr) + stderr = open(stderr, "w") + + if stdin is None: + stdin = _sys.stdin + + if stdout is None: + stdout = _sys.stdout + + if stderr is None: + stderr = _sys.stderr + + stash_file = None + + if stash: + stash_file = make_temp_file() + out = open(stash_file, "w") + stdout = out + stderr = out + + if shell: + if is_string(command): + args = command + else: + args = " ".join(map(str, command)) + else: + if is_string(command): + args = _shlex.split(command) + else: + args = command + + args = [expand(str(x)) for x in args] + + try: + proc = PlanoProcess(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, close_fds=True, stash_file=stash_file) + except OSError as e: + raise PlanoError("Command {}: {}".format(_format_command(command), str(e))) + + _notice(quiet, "{} started", proc) + + return proc + +def stop(proc, timeout=None, quiet=False): + _notice(quiet, "Stopping {}", proc) + + if proc.poll() is not None: + if proc.exit_code == 0: + debug("{} already exited normally", proc) + elif proc.exit_code == -(_signal.SIGTERM): + debug("{} was already terminated", proc) + else: + debug("{} already exited with code {}", proc, proc.exit_code) + + return proc + + kill(proc, quiet=True) + + return wait(proc, timeout=timeout, quiet=True) + +def kill(proc, quiet=False): + _notice(quiet, "Killing {}", proc) + + proc.terminate() + +def wait(proc, timeout=None, check=False, quiet=False): + _notice(quiet, "Waiting for {} to exit", proc) + + try: + proc.wait(timeout=timeout) + except _subprocess.TimeoutExpired: + error("{} timed out after {} seconds", proc, timeout) + raise PlanoTimeout() + + if proc.exit_code == 0: + debug("{} exited normally", proc) + elif proc.exit_code < 0: + debug("{} was terminated by signal {}", proc, abs(proc.exit_code)) + else: + if check: + error("{} exited with code {}", proc, proc.exit_code) + else: + debug("{} exited with code {}", proc, proc.exit_code) + + if proc.stash_file is not None: + if proc.exit_code > 0: + eprint(read(proc.stash_file), end="") + + if not WINDOWS: + remove(proc.stash_file, quiet=True) + + if check and proc.exit_code > 0: + raise PlanoProcessError(proc) + + return proc + +# input= - Pipe to the process +def run(command, stdin=None, stdout=None, stderr=None, input=None, output=None, + stash=False, shell=False, check=True, quiet=False): + _notice(quiet, "Running command {}", _format_command(command)) + + if input is not None: + assert stdin in (None, _subprocess.PIPE), stdin + + input = input.encode("utf-8") + stdin = _subprocess.PIPE + + proc = start(command, stdin=stdin, stdout=stdout, stderr=stderr, output=output, + stash=stash, shell=shell, quiet=True) + + proc.stdout_result, proc.stderr_result = proc.communicate(input=input) + + if proc.stdout_result is not None: + proc.stdout_result = proc.stdout_result.decode("utf-8") + + if proc.stderr_result is not None: + proc.stderr_result = proc.stderr_result.decode("utf-8") + + return wait(proc, check=check, quiet=True) + +# input= - Pipe the given input into the process +def call(command, input=None, shell=False, quiet=False): + _notice(quiet, "Calling {}", _format_command(command)) + + proc = run(command, stdin=_subprocess.PIPE, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE, + input=input, shell=shell, check=True, quiet=True) + + return proc.stdout_result + +def exit(arg=None, *args, **kwargs): + verbose = kwargs.get("verbose", False) + + if arg in (0, None): + if verbose: + notice("Exiting normally") + + _sys.exit() + + if is_string(arg): + if args: + arg = arg.format(*args) + + if verbose: + error(arg) + + _sys.exit(arg) + + if isinstance(arg, BaseException): + if verbose: + error(arg) + + _sys.exit(str(arg)) + + if isinstance(arg, int): + _sys.exit(arg) + + raise PlanoException("Illegal argument") + +_child_processes = list() + +class PlanoProcess(_subprocess.Popen): + def __init__(self, args, **options): + self.stash_file = options.pop("stash_file", None) + + super().__init__(args, **options) + + self.args = args + self.stdout_result = None + self.stderr_result = None + + _child_processes.append(self) + + @property + def exit_code(self): + return self.returncode + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + stop(self) + + def __repr__(self): + return "process {} (command {})".format(self.pid, _format_command(self.args)) + +class PlanoProcessError(_subprocess.CalledProcessError, PlanoError): + def __init__(self, proc): + super().__init__(proc.exit_code, _format_command(proc.args, represent=False)) + +def _default_sigterm_handler(signum, frame): + for proc in _child_processes: + if proc.poll() is None: + kill(proc, quiet=True) + + exit(-(_signal.SIGTERM)) + +_signal.signal(_signal.SIGTERM, _default_sigterm_handler) + +## String operations + +def string_replace(string, expr, replacement, count=0): + return _re.sub(expr, replacement, string, count) + +def remove_prefix(string, prefix): + if string is None: + return "" + + if prefix and string.startswith(prefix): + string = string[len(prefix):] + + return string + +def remove_suffix(string, suffix): + if string is None: + return "" + + if suffix and string.endswith(suffix): + string = string[:-len(suffix)] + + return string + +def shorten(string, max, ellipsis=None): + assert max is None or isinstance(max, int) + + if string is None: + return "" + + if max is None or len(string) < max: + return string + else: + if ellipsis is not None: + string = string + ellipsis + end = _max(0, max - len(ellipsis)) + return string[0:end] + ellipsis + else: + return string[0:max] + +def plural(noun, count=0, plural=None): + if noun in (None, ""): + return "" + + if count == 1: + return noun + + if plural is None: + if noun.endswith("s"): + plural = "{}ses".format(noun) + else: + plural = "{}s".format(noun) + + return plural + +def capitalize(string): + if not string: + return "" + + return string[0].upper() + string[1:] + +def base64_encode(string): + return _base64.b64encode(string) + +def base64_decode(string): + return _base64.b64decode(string) + +def url_encode(string): + return _urllib_parse.quote_plus(string) + +def url_decode(string): + return _urllib_parse.unquote_plus(string) + +def parse_url(url): + return _urllib_parse.urlparse(url) + +## Temp operations + +def get_system_temp_dir(): + return _tempfile.gettempdir() + +def get_user_temp_dir(): + try: + return _os.environ["XDG_RUNTIME_DIR"] + except KeyError: + return join(get_system_temp_dir(), get_user()) + +def make_temp_file(prefix="plano-", suffix="", dir=None): + if dir is None: + dir = get_system_temp_dir() + + return _tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir)[1] + +def make_temp_dir(prefix="plano-", suffix="", dir=None): + if dir is None: + dir = get_system_temp_dir() + + return _tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=dir) + +class temp_file: + def __init__(self, prefix="plano-", suffix="", dir=None): + if dir is None: + dir = get_system_temp_dir() + + self.fd, self.file = _tempfile.mkstemp(prefix=prefix, suffix=suffix, dir=dir) + + def __enter__(self): + return self.file + + def __exit__(self, exc_type, exc_value, traceback): + _os.close(self.fd) + + if not WINDOWS: # XXX + remove(self.file, quiet=True) + +class temp_dir: + def __init__(self, prefix="plano-", suffix="", dir=None): + self.dir = make_temp_dir(prefix=prefix, suffix=suffix, dir=dir) + + def __enter__(self): + return self.dir + + def __exit__(self, exc_type, exc_value, traceback): + remove(self.dir, quiet=True) + +## Time operations + +# Unix time +def get_time(): + return _time.time() + +# Python UTC time +def get_datetime(): + return _datetime.datetime.now(tz=_datetime.timezone.utc) + +def parse_timestamp(timestamp, format="%Y-%m-%dT%H:%M:%SZ"): + if timestamp is None: + return None + + datetime = _datetime.datetime.strptime(timestamp, format) + datetime = datetime.replace(tzinfo=_datetime.timezone.utc) + + return datetime + +def format_timestamp(datetime=None, format="%Y-%m-%dT%H:%M:%SZ"): + if datetime is None: + datetime = get_datetime() + + return datetime.strftime(format) + +def format_date(datetime=None): + if datetime is None: + datetime = get_datetime() + + day = datetime.day + month = datetime.strftime("%B") + year = datetime.strftime("%Y") + + return f"{day} {month} {year}" + +def format_time(datetime=None, precision="second"): + if datetime is None: + datetime = get_datetime() + + assert precision in ("minute", "second"), "Illegal precision value" + + hour = datetime.hour + minute = datetime.strftime("%M") + second = datetime.strftime("%S") + + if precision == "second": + return f"{hour}:{minute}:{second}" + else: + return f"{hour}:{minute}" + +def format_duration(seconds, align=False): + assert seconds >= 0 + + if seconds >= 3600: + value = seconds / 3600 + unit = "h" + elif seconds >= 5 * 60: + value = seconds / 60 + unit = "m" + else: + value = seconds + unit = "s" + + if align: + return "{:.1f}{}".format(value, unit) + elif value > 10: + return "{:.0f}{}".format(value, unit) + else: + return remove_suffix("{:.1f}".format(value), ".0") + unit + +def sleep(seconds, quiet=False): + _notice(quiet, "Sleeping for {} {}", seconds, plural("second", seconds)) + + _time.sleep(seconds) + +class Timer: + def __init__(self, timeout=None, timeout_message=None): + self.timeout = timeout + self.timeout_message = timeout_message + + if self.timeout is not None and not hasattr(_signal, "SIGALRM"): # pragma: nocover + self.timeout = None + + self.start_time = None + self.stop_time = None + + def start(self): + self.start_time = get_time() + + if self.timeout is not None: + self.prev_handler = _signal.signal(_signal.SIGALRM, self.raise_timeout) + self.prev_timeout, prev_interval = _signal.setitimer(_signal.ITIMER_REAL, self.timeout) + self.prev_timer_suspend_time = get_time() + + assert prev_interval == 0.0, "This case is not yet handled" + + def stop(self): + self.stop_time = get_time() + + if self.timeout is not None: + assert get_time() - self.prev_timer_suspend_time > 0, "This case is not yet handled" + + _signal.signal(_signal.SIGALRM, self.prev_handler) + _signal.setitimer(_signal.ITIMER_REAL, self.prev_timeout) + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.stop() + + @property + def elapsed_time(self): + assert self.start_time is not None + + if self.stop_time is None: + return get_time() - self.start_time + else: + return self.stop_time - self.start_time + + def raise_timeout(self, *args): + raise PlanoTimeout(self.timeout_message) + +## Unique ID operations + +# Length in bytes, renders twice as long in hex +def get_unique_id(bytes=16): + assert bytes >= 1 + assert bytes <= 16 + + uuid_bytes = _uuid.uuid4().bytes + uuid_bytes = uuid_bytes[:bytes] + + return _binascii.hexlify(uuid_bytes).decode("utf-8") + +## Value operations + +def nvl(value, replacement): + if value is None: + return replacement + + return value + +def is_string(value): + return isinstance(value, str) + +def is_scalar(value): + return value is None or isinstance(value, (str, int, float, complex, bool)) + +def is_empty(value): + return value in (None, "", (), [], {}) + +def pformat(value): + return _pprint.pformat(value, width=120) + +def format_empty(value, replacement): + if is_empty(value): + value = replacement + + return value + +def format_not_empty(value, template=None): + if not is_empty(value) and template is not None: + value = template.format(value) + + return value + +def format_repr(obj, limit=None): + attrs = ["{}={}".format(k, repr(v)) for k, v in obj.__dict__.items()] + return "{}({})".format(obj.__class__.__name__, ", ".join(attrs[:limit])) + +class Namespace: + def __init__(self, **kwargs): + for name in kwargs: + setattr(self, name, kwargs[name]) + + def __eq__(self, other): + return vars(self) == vars(other) + + def __contains__(self, key): + return key in self.__dict__ + + def __repr__(self): + return format_repr(self) + +## YAML operations + +def read_yaml(file): + check_module("yaml", "Python module 'yaml' is not found. To install it, run 'pip install pyyaml'.") + + import yaml as _yaml + + file = expand(file) + + with open(file) as f: + return _yaml.safe_load(f) + +def write_yaml(file, data): + check_module("yaml", "Python module 'yaml' is not found. To install it, run 'pip install pyyaml'.") + + import yaml as _yaml + + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + _yaml.safe_dump(data, f) + + return file + +def parse_yaml(yaml): + check_module("yaml", "Python module 'yaml' is not found. To install it, run 'pip install pyyaml'.") + + import yaml as _yaml + + return _yaml.safe_load(yaml) + +def emit_yaml(data): + check_module("yaml", "Python module 'yaml' is not found. To install it, run 'pip install pyyaml'.") + + import yaml as _yaml + + return _yaml.safe_dump(data) + +def print_yaml(data, **kwargs): + print(emit_yaml(data), **kwargs) + +if PLANO_DEBUG: # pragma: nocover + enable_logging(level="debug") diff --git a/external/skewer/external/plano/src/plano/test.py b/external/skewer/external/plano/src/plano/test.py new file mode 100644 index 0000000..fb87d8d --- /dev/null +++ b/external/skewer/external/plano/src/plano/test.py @@ -0,0 +1,428 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * +from .command import * + +import argparse as _argparse +import asyncio as _asyncio +import fnmatch as _fnmatch +import functools as _functools +import importlib as _importlib +import inspect as _inspect +import sys as _sys +import traceback as _traceback + +class PlanoTestCommand(BaseCommand): + def __init__(self, test_modules=[]): + self.test_modules = test_modules + + if _inspect.ismodule(self.test_modules): + self.test_modules = [self.test_modules] + + self.parser = BaseArgumentParser() + self.parser.add_argument("include", metavar="PATTERN", nargs="*", default=["*"], + help="Run tests with names matching PATTERN (default '*', all tests)") + self.parser.add_argument("-e", "--exclude", metavar="PATTERN", action="append", default=[], + help="Do not run tests with names matching PATTERN (repeatable)") + self.parser.add_argument("-m", "--module", action="append", default=[], + help="Collect tests from MODULE (repeatable)") + self.parser.add_argument("-l", "--list", action="store_true", + help="Print the test names and exit") + self.parser.add_argument("--enable", metavar="PATTERN", action="append", default=[], + help=_argparse.SUPPRESS) + self.parser.add_argument("--unskip", metavar="PATTERN", action="append", default=[], + help="Run skipped tests matching PATTERN (repeatable)") + self.parser.add_argument("--timeout", metavar="SECONDS", type=int, default=300, + help="Fail any test running longer than SECONDS (default 300)") + self.parser.add_argument("--fail-fast", action="store_true", + help="Exit on the first failure encountered in a test run") + self.parser.add_argument("--iterations", metavar="COUNT", type=int, default=1, + help="Run the tests COUNT times (default 1)") + self.parser.add_argument("--verbose", action="store_true", + help="Print detailed logging to the console") + self.parser.add_argument("--quiet", action="store_true", + help="Print no logging to the console") + + def parse_args(self, args): + return self.parser.parse_args(args) + + def configure_logging(self, args): + if args.verbose: + return "notice", None + + if args.quiet: + return "error", None + + return "warning", None + + def init(self, args): + self.list_only = args.list + self.include_patterns = args.include + self.exclude_patterns = args.exclude + self.enable_patterns = args.enable + self.unskip_patterns = args.unskip + self.timeout = args.timeout + self.fail_fast = args.fail_fast + self.iterations = args.iterations + self.verbose = args.verbose + self.quiet = args.quiet + + try: + for name in args.module: + self.test_modules.append(_importlib.import_module(name)) + except ImportError as e: + raise PlanoError(e) + + def run(self): + if self.list_only: + print_tests(self.test_modules) + return + + for i in range(self.iterations): + run_tests(self.test_modules, include=self.include_patterns, + exclude=self.exclude_patterns, + enable=self.enable_patterns, unskip=self.unskip_patterns, + test_timeout=self.timeout, fail_fast=self.fail_fast, + verbose=self.verbose, quiet=self.quiet) + +class PlanoTestSkipped(Exception): + pass + +def test(_function=None, name=None, module=None, timeout=None, disabled=False): + class Test: + def __init__(self, function): + self.function = function + self.name = name + self.module = module + self.timeout = timeout + self.disabled = disabled + + if self.name is None: + self.name = self.function.__name__.strip("_").replace("_", "-") + + if self.module is None: + self.module = _inspect.getmodule(self.function) + + if not hasattr(self.module, "_plano_tests"): + self.module._plano_tests = list() + + self.module._plano_tests.append(self) + + def __call__(self, test_run, unskipped): + try: + ret = self.function() + + if _inspect.iscoroutine(ret): + _asyncio.run(ret) + except SystemExit as e: + error(e) + raise PlanoError("System exit with code {}".format(e)) + + def __repr__(self): + return "test '{}:{}'".format(self.module.__name__, self.name) + + if _function is None: + return Test + else: + return Test(_function) + +def add_test(name, func, *args, **kwargs): + test(_functools.partial(func, *args, **kwargs), name=name, module=_inspect.getmodule(func)) + +def skip_test(reason=None): + if _inspect.stack()[2].frame.f_locals["unskipped"]: + return + + raise PlanoTestSkipped(reason) + +class expect_exception: + def __init__(self, exception_type=Exception, contains=None): + self.exception_type = exception_type + self.contains = contains + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + if exc_value is None: + assert False, "Never encountered expected exception {}".format(self.exception_type.__name__) + + if self.contains is None: + return isinstance(exc_value, self.exception_type) + else: + return isinstance(exc_value, self.exception_type) and self.contains in str(exc_value) + +class expect_error(expect_exception): + def __init__(self, contains=None): + super().__init__(PlanoError, contains=contains) + +class expect_timeout(expect_exception): + def __init__(self, contains=None): + super().__init__(PlanoTimeout, contains=contains) + +class expect_system_exit(expect_exception): + def __init__(self, contains=None): + super().__init__(SystemExit, contains=contains) + +class expect_output(temp_file): + def __init__(self, equals=None, contains=None, startswith=None, endswith=None): + super().__init__() + self.equals = equals + self.contains = contains + self.startswith = startswith + self.endswith = endswith + + def __exit__(self, exc_type, exc_value, traceback): + result = read(self.file) + + if self.equals is None: + assert len(result) > 0, result + else: + assert result == self.equals, result + + if self.contains is not None: + assert self.contains in result, result + + if self.startswith is not None: + assert result.startswith(self.startswith), result + + if self.endswith is not None: + assert result.endswith(self.endswith), result + + super().__exit__(exc_type, exc_value, traceback) + +def print_tests(modules): + if _inspect.ismodule(modules): + modules = (modules,) + + for module in modules: + for test in module._plano_tests: + flags = "(disabled)" if test.disabled else "" + print(" ".join((str(test), flags)).strip()) + +def run_tests(modules, include="*", exclude=(), enable=(), unskip=(), test_timeout=300, + fail_fast=False, verbose=False, quiet=False): + if _inspect.ismodule(modules): + modules = (modules,) + + if is_string(include): + include = (include,) + + if is_string(exclude): + exclude = (exclude,) + + if is_string(enable): + enable = (enable,) + + if is_string(unskip): + enable = (unskip,) + + test_run = TestRun(test_timeout=test_timeout, fail_fast=fail_fast, verbose=verbose, quiet=quiet) + + if verbose: + notice("Starting {}", test_run) + elif not quiet: + cprint("=== Configuration ===", color="cyan") + + props = ( + ("Modules", format_empty(", ".join([x.__name__ for x in modules]), "[none]")), + ("Test timeout", format_duration(test_timeout)), + ("Fail fast", fail_fast), + ) + + print_properties(props) + print() + + stop = False + + for module in modules: + if stop: + break + + if verbose: + notice("Running tests from module {} (file {})", repr(module.__name__), repr(module.__file__)) + elif not quiet: + cprint("=== Module {} ===".format(repr(module.__name__)), color="cyan") + + if not hasattr(module, "_plano_tests"): + warning("Module {} has no tests", repr(module.__name__)) + continue + + for test in module._plano_tests: + if stop: + break + + if test.disabled and not any([_fnmatch.fnmatchcase(test.name, x) for x in enable]): + continue + + included = any([_fnmatch.fnmatchcase(test.name, x) for x in include]) + excluded = any([_fnmatch.fnmatchcase(test.name, x) for x in exclude]) + unskipped = any([_fnmatch.fnmatchcase(test.name, x) for x in unskip]) + + if included and not excluded: + test_run.tests.append(test) + stop = _run_test(test_run, test, unskipped) + + if not verbose and not quiet: + print() + + total = len(test_run.tests) + skipped = len(test_run.skipped_tests) + failed = len(test_run.failed_tests) + + if total == 0: + raise PlanoError("No tests ran") + + notes = "" + + if skipped != 0: + notes = "({} skipped)".format(skipped) + + if failed == 0: + result_message = "All tests passed {}".format(notes).strip() + else: + result_message = "{} {} failed {}".format(failed, plural("test", failed), notes).strip() + + if verbose: + if failed == 0: + notice(result_message) + else: + error(result_message) + elif not quiet: + cprint("=== Summary ===", color="cyan") + + props = ( + ("Total", total), + ("Skipped", skipped, format_not_empty(", ".join([x.name for x in test_run.skipped_tests]), "({})")), + ("Failed", failed, format_not_empty(", ".join([x.name for x in test_run.failed_tests]), "({})")), + ) + + print_properties(props) + print() + + cprint("=== RESULT ===", color="cyan") + + if failed == 0: + cprint(result_message, color="green") + else: + cprint(result_message, color="red", bright="True") + + print() + + if failed != 0: + raise PlanoError(result_message) + +def _run_test(test_run, test, unskipped): + if test_run.verbose: + notice("Running {}", test) + elif not test_run.quiet: + print("{:.<65} ".format(test.name + " "), end="") + + timeout = nvl(test.timeout, test_run.test_timeout) + + with temp_file() as output_file: + try: + with Timer(timeout=timeout) as timer: + if test_run.verbose: + test(test_run, unskipped) + else: + with output_redirected(output_file, quiet=True): + test(test_run, unskipped) + except KeyboardInterrupt: + raise + except PlanoTestSkipped as e: + test_run.skipped_tests.append(test) + + if test_run.verbose: + notice("{} SKIPPED ({})", test, format_duration(timer.elapsed_time)) + elif not test_run.quiet: + _print_test_result("SKIPPED", timer, "yellow") + print("Reason: {}".format(str(e))) + except Exception as e: + test_run.failed_tests.append(test) + + if test_run.verbose: + _traceback.print_exc() + + if isinstance(e, PlanoTimeout): + error("{} **FAILED** (TIMEOUT) ({})", test, format_duration(timer.elapsed_time)) + else: + error("{} **FAILED** ({})", test, format_duration(timer.elapsed_time)) + elif not test_run.quiet: + if isinstance(e, PlanoTimeout): + _print_test_result("**FAILED** (TIMEOUT)", timer, color="red", bright=True) + else: + _print_test_result("**FAILED**", timer, color="red", bright=True) + + _print_test_error(e) + _print_test_output(output_file) + + if test_run.fail_fast: + return True + else: + test_run.passed_tests.append(test) + + if test_run.verbose: + notice("{} PASSED ({})", test, format_duration(timer.elapsed_time)) + elif not test_run.quiet: + _print_test_result("PASSED", timer) + +def _print_test_result(status, timer, color="white", bright=False): + cprint("{:<7}".format(status), color=color, bright=bright, end="") + print("{:>6}".format(format_duration(timer.elapsed_time, align=True))) + +def _print_test_error(e): + cprint("--- Error ---", color="yellow") + + if isinstance(e, PlanoProcessError): + print("> {}".format(str(e))) + else: + lines = _traceback.format_exc().rstrip().split("\n") + lines = ["> {}".format(x) for x in lines] + + print("\n".join(lines)) + +def _print_test_output(output_file): + if get_file_size(output_file) == 0: + return + + cprint("--- Output ---", color="yellow") + + with open(output_file, "r") as out: + for line in out: + print("> {}".format(line), end="") + +class TestRun: + def __init__(self, test_timeout=None, fail_fast=False, verbose=False, quiet=False): + self.test_timeout = test_timeout + self.fail_fast = fail_fast + self.verbose = verbose + self.quiet = quiet + + self.tests = list() + self.skipped_tests = list() + self.failed_tests = list() + self.passed_tests = list() + + def __repr__(self): + return format_repr(self) + +def _main(): # pragma: nocover + PlanoTestCommand().main() diff --git a/subrepos/skewer/subrepos/plano/scripts/test-fedora.dockerfile b/external/skewer/plano old mode 100644 new mode 100755 similarity index 79% rename from subrepos/skewer/subrepos/plano/scripts/test-fedora.dockerfile rename to external/skewer/plano index def55a1..476427d --- a/subrepos/skewer/subrepos/plano/scripts/test-fedora.dockerfile +++ b/external/skewer/plano @@ -1,3 +1,4 @@ +#!/usr/bin/python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -17,12 +18,11 @@ # under the License. # -FROM fedora +import sys -RUN dnf -qy update && dnf -q clean all +sys.path.insert(0, "python") -RUN dnf -y install make python2 findutils python3-pyyaml +from plano import PlanoCommand -COPY . /root/plano -WORKDIR /root/plano -CMD ["make", "clean", "test", "install", "PREFIX=/usr/local"] +if __name__ == "__main__": + PlanoCommand().main() diff --git a/external/skewer/python/plano b/external/skewer/python/plano new file mode 120000 index 0000000..e9b6dc5 --- /dev/null +++ b/external/skewer/python/plano @@ -0,0 +1 @@ +../external/plano/src/plano \ No newline at end of file diff --git a/external/skewer/python/skewer/__init__.py b/external/skewer/python/skewer/__init__.py new file mode 100644 index 0000000..3324b21 --- /dev/null +++ b/external/skewer/python/skewer/__init__.py @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * diff --git a/external/skewer/python/skewer/main.py b/external/skewer/python/skewer/main.py new file mode 100644 index 0000000..8db876f --- /dev/null +++ b/external/skewer/python/skewer/main.py @@ -0,0 +1,731 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import inspect + +from plano import * + +__all__ = [ + "generate_readme", "run_steps", "Minikube", +] + +standard_text = read_yaml(join(get_parent_dir(__file__), "standardtext.yaml")) +standard_steps = read_yaml(join(get_parent_dir(__file__), "standardsteps.yaml")) + +def check_environment(): + check_program("base64") + check_program("curl") + check_program("kubectl") + check_program("skupper") + +def resource_exists(resource): + return run(f"kubectl get {resource}", output=DEVNULL, check=False, quiet=True).exit_code == 0 + +def get_resource_json(resource, jsonpath=""): + return call(f"kubectl get {resource} -o jsonpath='{{{jsonpath}}}'", quiet=True) + +def await_resource(resource, timeout=300): + assert "/" in resource, resource + + start_time = get_time() + + while True: + notice(f"Waiting for {resource} to become available") + + if resource_exists(resource): + break + + if get_time() - start_time > timeout: + fail(f"Timed out waiting for {resource}") + + sleep(5, quiet=True) + + if resource.startswith("deployment/"): + try: + run(f"kubectl wait --for condition=available --timeout {timeout}s {resource}", quiet=True, stash=True) + except: + run(f"kubectl logs {resource}") + raise + +def await_ingress(service, timeout=300): + assert service.startswith("service/"), service + + start_time = get_time() + + await_resource(service, timeout=timeout) + + while True: + notice(f"Waiting for hostname or IP from {service} to become available") + + json = get_resource_json(service, ".status.loadBalancer.ingress") + + if json != "": + break + + if get_time() - start_time > timeout: + fail(f"Timed out waiting for hostnmae or external IP for {service}") + + sleep(5, quiet=True) + + data = parse_json(json) + + if len(data): + if "hostname" in data[0]: + return data[0]["hostname"] + + if "ip" in data[0]: + return data[0]["ip"] + + fail(f"Failed to get hostname or IP from {service}") + +def await_http_ok(service, url_template, user=None, password=None, timeout=300): + assert service.startswith("service/"), service + + start_time = get_time() + + ip = await_ingress(service, timeout=timeout) + + url = url_template.format(ip) + insecure = url.startswith("https") + + while True: + notice(f"Waiting for HTTP OK from {url}") + + try: + http_get(url, insecure=insecure, user=user, password=password, quiet=True) + except PlanoError: + if get_time() - start_time > timeout: + fail(f"Timed out waiting for HTTP OK from {url}") + + sleep(5, quiet=True) + else: + break + +def await_console_ok(): + await_resource("secret/skupper-console-users") + + password = get_resource_json("secret/skupper-console-users", ".data.admin") + password = base64_decode(password) + + await_http_ok("service/skupper", "https://{}:8010/", user="admin", password=password) + +def run_steps(skewer_file, kubeconfigs=[], work_dir=None, debug=False): + notice(f"Running steps (skewer_file='{skewer_file}')") + + check_environment() + + model = Model(skewer_file, kubeconfigs) + model.check() + + if work_dir is None: + work_dir = join(get_user_temp_dir(), "skewer") + remove(work_dir, quiet=True) + make_dir(work_dir, quiet=True) + + try: + for step in model.steps: + if step.name == "cleaning_up": + continue + + run_step(model, step, work_dir) + + if "SKEWER_DEMO" in ENV: + pause_for_demo(model) + except: + if debug: + print_debug_output(model) + + raise + finally: + for step in model.steps: + if step.name == "cleaning_up": + run_step(model, step, work_dir, check=False) + break + +def run_step(model, step, work_dir, check=True): + if not step.commands: + return + + notice(f"Running {step}") + + for site_name, commands in step.commands: + with dict(model.sites)[site_name] as site: + if site.platform == "kubernetes": + run(f"kubectl config set-context --current --namespace {site.namespace}", stdout=DEVNULL, quiet=True) + + for command in commands: + if command.apply == "readme": + continue + + if command.await_resource: + await_resource(command.await_resource) + + if command.await_ingress: + await_ingress(command.await_ingress) + + if command.await_http_ok: + await_http_ok(*command.await_http_ok) + + if command.await_console_ok: + await_console_ok() + + if command.run: + run(command.run.replace("~", work_dir), shell=True, check=check) + +def pause_for_demo(model): + notice("Pausing for demo time") + + first_site = [x for _, x in model.sites][0] + console_url = None + password = None + frontend_url = None + + if first_site.platform == "kubernetes": + with first_site: + if resource_exists("service/frontend"): + if get_resource_json("service/frontend", ".spec.type") == "LoadBalancer": + frontend_host = await_ingress("service/frontend") + frontend_url = f"http://{frontend_host}:8080/" + + if resource_exists("secret/skupper-console-users"): + console_host = await_ingress("service/skupper") + console_url = f"https://{console_host}:8010/" + + await_resource("secret/skupper-console-users") + password = get_resource_json("secret/skupper-console-users", ".data.admin") + password = base64_decode(password).decode("ascii") + + print() + print("Demo time!") + print() + print("Sites:") + print() + + for _, site in model.sites: + if site.platform == "kubernetes": + kubeconfig = site.env["KUBECONFIG"] + print(f" {site.name}: export KUBECONFIG={kubeconfig}") + elif site.platform == "podman": + print(f" {site.name}: export SKUPPER_PLATFORM=podman") + + print() + + if frontend_url: + print(f"Frontend URL: {frontend_url}") + print() + + if console_url: + print(f"Console URL: {console_url}") + print( "Console user: admin") + print(f"Console password: {password}") + print() + + if "SKEWER_DEMO_NO_WAIT" not in ENV: + while input("Are you done (yes)? ") != "yes": # pragma: nocover + pass + +def print_debug_output(model): + print("TROUBLE!") + print("-- Start of debug output") + + for _, site in model.sites: + print(f"---- Debug output for site '{site.name}'") + + with site: + if site.platform == "kubernetes": + run("kubectl get services", check=False) + run("kubectl get deployments", check=False) + run("kubectl get statefulsets", check=False) + run("kubectl get pods", check=False) + run("kubectl get events", check=False) + + run("skupper version", check=False) + run("skupper status", check=False) + run("skupper link status", check=False) + run("skupper service status", check=False) + run("skupper network status", check=False) + run("skupper debug events", check=False) + + if site.platform == "kubernetes": + run("kubectl logs deployment/skupper-router", check=False) + run("kubectl logs deployment/skupper-service-controller", check=False) + + print("-- End of debug output") + +def generate_readme(skewer_file, output_file): + notice(f"Generating the readme (skewer_file='{skewer_file}', output_file='{output_file}')") + + model = Model(skewer_file) + model.check() + + out = list() + + def generate_workflow_url(workflow): + result = parse_url(workflow) + + if result.scheme: + return workflow + + owner, repo = get_github_owner_repo() + + return f"https://github.com/{owner}/{repo}/actions/workflows/{workflow}" + + def generate_step_heading(step): + if step.numbered: + return f"Step {step.number}: {step.title}" + else: + return step.title + + def append_toc_entry(heading, condition=True): + if not condition: + return + + fragment = string_replace(heading, r"[ -]", "_") + fragment = string_replace(fragment, r"[\W]", "") + fragment = string_replace(fragment, "_", "-") + fragment = fragment.lower() + + out.append(f"* [{heading}](#{fragment})") + + def append_section(heading, text): + if not text: + return + + out.append(f"## {heading}") + out.append("") + out.append(text.strip()) + out.append("") + + out.append(f"# {model.title}") + out.append("") + + if model.workflow: + url = generate_workflow_url(model.workflow) + out.append(f"[![main]({url}/badge.svg)]({url})") + out.append("") + + if model.subtitle: + out.append(f"#### {model.subtitle}") + out.append("") + + out.append(standard_text["example_suite"].strip()) + out.append("") + out.append("#### Contents") + out.append("") + + append_toc_entry("Overview", model.overview) + append_toc_entry("Prerequisites") + + for step in model.steps: + append_toc_entry(generate_step_heading(step)) + + append_toc_entry("Summary") + append_toc_entry("Next steps") + append_toc_entry("About this example") + + out.append("") + + append_section("Overview", model.overview) + append_section("Prerequisites", model.prerequisites) + + for step in model.steps: + heading = generate_step_heading(step) + text = generate_readme_step(model, step) + + append_section(heading, text) + + append_section("Summary", model.summary) + append_section("Next steps", model.next_steps) + append_section("About this example", standard_text["about_this_example"].strip()) + + write(output_file, "\n".join(out).strip() + "\n") + +def generate_readme_step(model, step): + notice(f"Generating {step}") + + out = list() + + if step.preamble: + out.append(step.preamble.strip()) + out.append("") + + for site_name, commands in step.commands: + site = dict(model.sites)[site_name] + outputs = list() + + out.append(f"_**{site.title}:**_") + out.append("") + out.append("~~~ shell") + + for command in commands: + if command.apply == "test": + continue + + if command.run: + out.append(command.run) + + if command.output: + assert command.run + + outputs.append((command.run, command.output)) + + out.append("~~~") + out.append("") + + if outputs: + out.append("_Sample output:_") + out.append("") + out.append("~~~ console") + out.append("\n\n".join((f"$ {run}\n{output.strip()}" for run, output in outputs))) + out.append("~~~") + out.append("") + + if step.postamble: + out.append(step.postamble.strip()) + + return "\n".join(out).strip() + +def apply_kubeconfigs(model, kubeconfigs): + kube_sites = [x for _, x in model.sites if x.platform == "kubernetes"] + + if kubeconfigs and len(kubeconfigs) < len(kube_sites): + fail("The provided kubeconfigs are fewer than the number of Kubernetes sites") + + for site, kubeconfig in zip(kube_sites, kubeconfigs): + site.env["KUBECONFIG"] = kubeconfig + +def apply_standard_steps(model): + notice("Applying standard steps") + + for step in model.steps: + if "standard" not in step.data: + continue + + standard_step_name = step.data["standard"] + + try: + standard_step_data = standard_steps[standard_step_name] + except KeyError: + fail(f"Standard step '{standard_step_name}' not found") + + del step.data["standard"] + + def apply_attribute(name, default=None): + if name not in step.data: + value = standard_step_data.get(name, default) + + if value and name in ("title", "preamble", "postamble"): + for i, site in enumerate([x for _, x in model.sites]): + value = value.replace(f"@site{i}@", site.title) + + if site.namespace: + value = value.replace(f"@namespace{i}@", site.namespace) + + step.data[name] = value + + apply_attribute("name") + apply_attribute("title") + apply_attribute("numbered", True) + apply_attribute("preamble") + apply_attribute("postamble") + + platform = standard_step_data.get("platform") + + if "commands" not in step.data and "commands" in standard_step_data: + step.data["commands"] = dict() + + for i, item in enumerate(dict(model.sites).items()): + site_name, site = item + + if platform and site.platform != platform: + continue + + if str(i) in standard_step_data["commands"]: + # Is a specific index in the standard commands? + commands = standard_step_data["commands"][str(i)] + step.data["commands"][site_name] = resolve_command_variables(commands, site) + elif "*" in standard_step_data["commands"]: + # Is "*" in the standard commands? + commands = standard_step_data["commands"]["*"] + step.data["commands"][site_name] = resolve_command_variables(commands, site) + else: + # Otherwise, omit commands for this site + continue + +def resolve_command_variables(commands, site): + resolved_commands = list() + + for command in commands: + resolved_command = dict(command) + + if "run" in command: + resolved_command["run"] = command["run"] + + if site.platform == "kubernetes": + resolved_command["run"] = resolved_command["run"].replace("@kubeconfig@", site.env["KUBECONFIG"]) + resolved_command["run"] = resolved_command["run"].replace("@namespace@", site.namespace) + + if "output" in command: + resolved_command["output"] = command["output"] + + if site.platform == "kubernetes": + resolved_command["output"] = resolved_command["output"].replace("@kubeconfig@", site.env["KUBECONFIG"]) + resolved_command["output"] = resolved_command["output"].replace("@namespace@", site.namespace) + + resolved_commands.append(resolved_command) + + return resolved_commands + +def get_github_owner_repo(): + check_program("git") + + url = call("git remote get-url origin", quiet=True) + result = parse_url(url) + + if result.scheme == "" and result.path.startswith("git@github.com:"): + path = remove_prefix(result.path, "git@github.com:") + path = remove_suffix(path, ".git") + + return path.split("/", 1) + + if result.scheme in ("http", "https") and result.netloc == "github.com": + path = remove_prefix(result.path, "/") + + return path.split("/", 1) + + fail("Unknown origin URL format") + +def object_property(name, default=None): + def get(obj): + return obj.data.get(name, default) + + return property(get) + +def check_required_attributes(obj, *names): + for name in names: + if name not in obj.data: + fail(f"{obj} is missing required attribute '{name}'") + +def check_unknown_attributes(obj): + known_attributes = dict(inspect.getmembers(obj.__class__, lambda x: isinstance(x, property))) + + for name in obj.data: + if name not in known_attributes: + fail(f"{obj} has unknown attribute '{name}'") + +class Model: + title = object_property("title") + subtitle = object_property("subtitle") + workflow = object_property("workflow", "main.yaml") + overview = object_property("overview") + prerequisites = object_property("prerequisites", standard_text["prerequisites"].strip()) + summary = object_property("summary") + next_steps = object_property("next_steps", standard_text["next_steps"].strip()) + + def __init__(self, skewer_file, kubeconfigs=[]): + self.skewer_file = skewer_file + self.data = read_yaml(self.skewer_file) + + apply_kubeconfigs(self, kubeconfigs) + apply_standard_steps(self) + + def __repr__(self): + return f"model '{self.skewer_file}'" + + def check(self): + check_required_attributes(self, "title", "sites", "steps") + check_unknown_attributes(self) + + for _, site in self.sites: + site.check() + + for step in self.steps: + step.check() + + @property + def sites(self): + for name, data in self.data["sites"].items(): + yield name, Site(self, data, name) + + @property + def steps(self): + for data in self.data["steps"]: + yield Step(self, data) + +class Site: + platform = object_property("platform") + namespace = object_property("namespace") + env = object_property("env", dict()) + + def __init__(self, model, data, name): + assert name is not None + + self.model = model + self.data = data + self.name = name + + def __repr__(self): + return f"site '{self.name}'" + + def __enter__(self): + self._logging_context = logging_context(self.name) + self._working_env = working_env(**self.env) + + self._logging_context.__enter__() + self._working_env.__enter__() + + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._working_env.__exit__(exc_type, exc_value, traceback) + self._logging_context.__exit__(exc_type, exc_value, traceback) + + def check(self): + check_required_attributes(self, "platform") + check_unknown_attributes(self) + + if self.platform not in ("kubernetes", "podman"): + fail(f"{self} attribute 'platform' has an illegal value: {self.platform}") + + if self.platform == "kubernetes": + check_required_attributes(self, "namespace") + + if "KUBECONFIG" not in self.env: + fail(f"Kubernetes {self} has no KUBECONFIG environment variable") + + if self.platform == "podman": + if "SKUPPER_PLATFORM" not in self.env: + fail(f"Podman {self} has no SKUPPER_PLATFORM environment variable") + + platform = self.env["SKUPPER_PLATFORM"] + + if platform != "podman": + fail(f"Podman {self} environment variable SKUPPER_PLATFORM has an illegal value: {platform}") + + @property + def title(self): + return self.data.get("title", capitalize(self.name)) + +class Step: + numbered = object_property("numbered", True) + name = object_property("name") + title = object_property("title") + preamble = object_property("preamble") + postamble = object_property("postamble") + + def __init__(self, model, data): + self.model = model + self.data = data + + def __repr__(self): + return f"step {self.number} '{self.title}'" + + def check(self): + check_required_attributes(self, "title") + check_unknown_attributes(self) + + site_names = [x.name for _, x in self.model.sites] + + for site_name, commands in self.commands: + if site_name not in site_names: + fail(f"Unknown site name '{site_name}' in commands for {self}") + + for command in commands: + command.check() + + @property + def number(self): + return self.model.data["steps"].index(self.data) + 1 + + @property + def commands(self): + for site_name, commands in self.data.get("commands", dict()).items(): + yield site_name, [Command(self.model, data) for data in commands] + +class Command: + run = object_property("run") + apply = object_property("apply") + output = object_property("output") + await_resource = object_property("await_resource") + await_ingress = object_property("await_ingress") + await_http_ok = object_property("await_http_ok") + await_console_ok = object_property("await_console_ok") + + def __init__(self, model, data): + self.model = model + self.data = data + + def __repr__(self): + if self.run: + return f"command '{self.run.splitlines()[0]}'" + + return "command" + + def check(self): + check_unknown_attributes(self) + +class Minikube: + def __init__(self, skewer_file): + self.skewer_file = skewer_file + self.kubeconfigs = list() + self.work_dir = join(get_user_temp_dir(), "skewer") + + def __enter__(self): + notice("Starting Minikube") + + check_environment() + check_program("minikube") + + profile_data = parse_json(call("minikube profile list --output json", quiet=True)) + + for profile in profile_data.get("valid", []): + if profile["Name"] == "skewer": + fail("A Minikube profile 'skewer' already exists. Delete it using 'minikube delete -p skewer'.") + + remove(self.work_dir, quiet=True) + make_dir(self.work_dir, quiet=True) + + run("minikube start -p skewer --auto-update-drivers false") + + tunnel_output_file = open(f"{self.work_dir}/minikube-tunnel-output", "w") + self.tunnel = start("minikube tunnel -p skewer", output=tunnel_output_file) + + model = Model(self.skewer_file) + model.check() + + kube_sites = [x for _, x in model.sites if x.platform == "kubernetes"] + + for site in kube_sites: + kubeconfig = site.env["KUBECONFIG"].replace("~", self.work_dir) + site.env["KUBECONFIG"] = kubeconfig + + self.kubeconfigs.append(kubeconfig) + + with site: + run("minikube update-context -p skewer") + check_file(ENV["KUBECONFIG"]) + + return self + + def __exit__(self, exc_type, exc_value, traceback): + notice("Stopping Minikube") + + stop(self.tunnel) + + run("minikube delete -p skewer") diff --git a/external/skewer/python/skewer/planocommands.py b/external/skewer/python/skewer/planocommands.py new file mode 100644 index 0000000..754fb1e --- /dev/null +++ b/external/skewer/python/skewer/planocommands.py @@ -0,0 +1,91 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * +from plano.github import * +from skewer import * + +_debug_param = CommandParameter("debug", help="Produce extra debug output on failure") + +@command +def generate(output="README.md"): + """ + Generate README.md from the data in skewer.yaml + """ + generate_readme("skewer.yaml", output) + +@command +def render(quiet=False): + """ + Render README.html from README.md + """ + generate() + + markdown = read("README.md") + html = convert_github_markdown(markdown) + + write("README.html", html) + + if not quiet: + print(f"file:{get_real_path('README.html')}") + +@command +def clean(): + remove(find(".", "__pycache__")) + remove("README.html") + +@command(parameters=[_debug_param]) +def run_(*kubeconfigs, debug=False): + """ + Run the example steps + + If no kubeconfigs are provided, Skewer starts a local Minikube + instance and runs the steps using it. + """ + if not kubeconfigs: + with Minikube("skewer.yaml") as mk: + run_steps("skewer.yaml", kubeconfigs=mk.kubeconfigs, work_dir=mk.work_dir, debug=debug) + else: + run_steps("skewer.yaml", kubeconfigs=kubeconfigs, debug=debug) + +@command(parameters=[_debug_param]) +def demo(*kubeconfigs, debug=False): + """ + Run the example steps and pause for a demo before cleaning up + """ + with working_env(SKEWER_DEMO=1): + run_(*kubeconfigs, debug=debug) + +@command(parameters=[_debug_param]) +def test_(debug=False): + """ + Test README generation and run the steps on Minikube + """ + generate(output=make_temp_file()) + run_(debug=debug) + +@command +def update_skewer(): + """ + Update the embedded Skewer repo and GitHub workflow + + This results in local changes to review and commit. + """ + update_external_from_github("external/skewer", "skupperproject", "skewer") + copy("external/skewer/config/.github/workflows/main.yaml", ".github/workflows/main.yaml") diff --git a/external/skewer/python/skewer/standardsteps.yaml b/external/skewer/python/skewer/standardsteps.yaml new file mode 100644 index 0000000..adc1620 --- /dev/null +++ b/external/skewer/python/skewer/standardsteps.yaml @@ -0,0 +1,330 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +install_the_skupper_command_line_tool: + title: Install the Skupper command-line tool + preamble: | + This example uses the Skupper command-line tool to deploy Skupper. + You need to install the `skupper` command only once for each + development environment. + + On Linux or Mac, you can use the install script (inspect it + [here][install-script]) to download and extract the command: + + ~~~ shell + curl https://skupper.io/install.sh | sh + ~~~ + + The script installs the command under your home directory. It + prompts you to add the command to your path if necessary. + + For Windows and other installation options, see [Installing + Skupper][install-docs]. + + [install-script]: https://github.com/skupperproject/skupper-website/blob/main/input/install.sh + [install-docs]: https://skupper.io/install/ +kubernetes/set_up_your_namespaces: + title: Set up your namespaces + platform: kubernetes + preamble: | + Skupper is designed for use with multiple Kubernetes namespaces, + usually on different clusters. The `skupper` and `kubectl` + commands use your [kubeconfig][kubeconfig] and current context to + select the namespace where they operate. + + [kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ + + Your kubeconfig is stored in a file in your home directory. The + `skupper` and `kubectl` commands use the `KUBECONFIG` environment + variable to locate it. + + A single kubeconfig supports only one active context per user. + Since you will be using multiple contexts at once in this + exercise, you need to create distinct kubeconfigs. + + For each namespace, open a new terminal window. In each terminal, + set the `KUBECONFIG` environment variable to a different path and + log in to your cluster. Then create the namespace you wish to use + and set the namespace on your current context. + + **Note:** The login procedure varies by provider. See the + documentation for yours: + + * [Minikube](https://skupper.io/start/minikube.html#cluster-access) + * [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html#cluster-access) + * [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html#cluster-access) + * [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html#cluster-access) + * [IBM Kubernetes Service](https://skupper.io/start/ibmks.html#cluster-access) + * [OpenShift](https://skupper.io/start/openshift.html#cluster-access) + commands: + "*": + - run: export KUBECONFIG=@kubeconfig@ + - run: "# Enter your provider-specific login command" + - run: kubectl create namespace @namespace@ + apply: readme + - run: kubectl create namespace @namespace@ --dry-run=client -o yaml | kubectl apply -f - + apply: test + - run: kubectl config set-context --current --namespace @namespace@ +kubernetes/set_up_your_kubernetes_namespace: + title: Set up your Kubernetes namespace + platform: kubernetes + preamble: | + Open a new terminal window and log in to your cluster. Then + create the namespace you wish to use and set the namespace on your + current context. + + **Note:** The login procedure varies by provider. See the + documentation for your chosen providers: + + * [Minikube](https://skupper.io/start/minikube.html#cluster-access) + * [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html#cluster-access) + * [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html#cluster-access) + * [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html#cluster-access) + * [IBM Kubernetes Service](https://skupper.io/start/ibmks.html#cluster-access) + * [OpenShift](https://skupper.io/start/openshift.html#cluster-access) + commands: + "*": + - run: "# Enter your provider-specific login command" + - run: kubectl create namespace @namespace@ + - run: kubectl config set-context --current --namespace @namespace@ +kubernetes/create_your_sites: + title: Create your sites + platform: kubernetes + preamble: | + A Skupper _site_ is a location where components of your + application are running. Sites are linked together to form a + network for your application. In Kubernetes, a site is associated + with a namespace. + + For each namespace, use `skupper init` to create a site. This + deploys the Skupper router and controller. Then use `skupper + status` to see the outcome. + + **Note:** If you are using Minikube, you need to [start minikube + tunnel][minikube-tunnel] before you run `skupper init`. + + [minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel + commands: + "*": + - run: skupper init + output: | + Waiting for LoadBalancer IP or hostname... + Waiting for status... + Skupper is now installed in namespace '@namespace@'. Use 'skupper status' to get more information. + - run: skupper status + output: | + Skupper is enabled for namespace "@namespace@". It is not connected to any other sites. It has no exposed services. + postamble: | + As you move through the steps below, you can use `skupper status` at + any time to check your progress. +podman/set_up_your_podman_network: + title: Set up your Podman network + platform: podman + preamble: | + Open a new terminal window and set the `SKUPPER_PLATFORM` + environment variable to `podman`. This sets the Skupper platform + to Podman for this terminal session. + + Use `podman network create` to create the Podman network that + Skupper will use. + + Use `systemctl` to enable the Podman API service. + commands: + "*": + - run: export SKUPPER_PLATFORM=podman + - run: podman network create skupper + apply: readme + - run: if ! podman network exists skupper; then podman network create skupper; fi + apply: test + - run: systemctl --user enable --now podman.socket + postamble: | + If the `systemctl` command doesn't work, you can try the `podman + system service` command instead: + + ~~~ + podman system service --time=0 unix://$XDG_RUNTIME_DIR/podman/podman.sock & + ~~~ +link_your_sites: + title: Link your sites + preamble: | + A Skupper _link_ is a channel for communication between two sites. + Links serve as a transport for application connections and + requests. + + Creating a link requires use of two `skupper` commands in + conjunction, `skupper token create` and `skupper link create`. + + The `skupper token create` command generates a secret token that + signifies permission to create a link. The token also carries the + link details. Then, in a remote site, The `skupper link + create` command uses the token to create a link to the site + that generated it. + + **Note:** The link token is truly a *secret*. Anyone who has the + token can link to your site. Make sure that only those you trust + have access to it. + + First, use `skupper token create` in site @site0@ to generate the + token. Then, use `skupper link create` in site @site1@ to link + the sites. + commands: + "0": + - run: skupper token create ~/secret.token + output: Token written to ~/secret.token + "1": + - run: skupper link create ~/secret.token + output: | + Site configured to link to (name=link1) + Check the status of the link using 'skupper link status'. + - run: skupper link status --wait 60 + apply: test + postamble: | + If your terminal sessions are on different machines, you may need + to use `scp` or a similar tool to transfer the token securely. By + default, tokens expire after a single use or 15 minutes after + creation. +kubernetes/link_your_sites: + title: Link your sites + platform: kubernetes + preamble: | + A Skupper _link_ is a channel for communication between two sites. + Links serve as a transport for application connections and + requests. + + Creating a link requires use of two `skupper` commands in + conjunction, `skupper token create` and `skupper link create`. + + The `skupper token create` command generates a secret token that + signifies permission to create a link. The token also carries the + link details. Then, in a remote site, The `skupper link + create` command uses the token to create a link to the site + that generated it. + + **Note:** The link token is truly a *secret*. Anyone who has the + token can link to your site. Make sure that only those you trust + have access to it. + + First, use `skupper token create` in site @site0@ to generate the + token. Then, use `skupper link create` in site @site1@ to link + the sites. + commands: + "0": + - run: skupper token create ~/secret.token + output: Token written to ~/secret.token + "1": + - run: skupper link create ~/secret.token + output: | + Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) + Check the status of the link using 'skupper link status'. + - run: skupper link status --wait 60 + apply: test + postamble: | + If your terminal sessions are on different machines, you may need + to use `scp` or a similar tool to transfer the token securely. By + default, tokens expire after a single use or 15 minutes after + creation. +cleaning_up: + name: cleaning_up + title: Cleaning up + numbered: false + preamble: | + To remove Skupper and the other resources from this exercise, use + the following commands. + commands: + "*": + - run: skupper delete +hello_world/deploy_the_frontend_and_backend: + title: Deploy the frontend and backend + preamble: | + This example runs the frontend and the backend in separate + Kubernetes namespaces, on different clusters. + + Use `kubectl create deployment` to deploy the frontend in + namespace `@namespace0@` and the backend in namespace + `@namespace1@`. + commands: + "0": + - run: kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend + "1": + - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 +hello_world/expose_the_backend: + title: Expose the backend + preamble: | + We now have our sites linked to form a Skupper network, but no + services are exposed on it. Skupper uses the `skupper expose` + command to select a service from one site for exposure in all the + linked sites. + + Use `skupper expose` to expose the backend service in @site1@ to + the frontend in @site0@. + commands: + "1": + - await_resource: deployment/backend + - run: skupper expose deployment/backend --port 8080 + output: deployment backend exposed as backend +hello_world/access_the_frontend: + title: Access the frontend + preamble: | + In order to use and test the application, we need external access + to the frontend. + + Use `kubectl expose` with `--type LoadBalancer` to open network + access to the frontend service. + + Once the frontend is exposed, use `kubectl get service/frontend` + to look up the external IP of the frontend service. If the + external IP is ``, try again after a moment. + + Once you have the external IP, use `curl` or a similar tool to + request the `/api/health` endpoint at that address. + + **Note:** The `` field in the following commands is a + placeholder. The actual value is an IP address. + commands: + "0": + - run: kubectl expose deployment/frontend --port 8080 --type LoadBalancer + output: service/frontend exposed + - await_resource: service/frontend + - run: kubectl get service/frontend + apply: readme + output: | + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s + - run: curl http://:8080/api/health + apply: readme + output: OK + - await_http_ok: [service/frontend, "http://{}:8080/api/health"] + postamble: | + If everything is in order, you can now access the web interface by + navigating to `http://:8080/` in your browser. +hello_world/cleaning_up: + name: cleaning_up + title: Cleaning up + numbered: false + preamble: | + To remove Skupper and the other resources from this exercise, use + the following commands: + commands: + "0": + - run: skupper delete + - run: kubectl delete service/frontend + - run: kubectl delete deployment/frontend + "1": + - run: skupper delete + - run: kubectl delete deployment/backend diff --git a/external/skewer/python/skewer/standardtext.yaml b/external/skewer/python/skewer/standardtext.yaml new file mode 100644 index 0000000..add76a2 --- /dev/null +++ b/external/skewer/python/skewer/standardtext.yaml @@ -0,0 +1,49 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +example_suite: | + This example is part of a [suite of examples][examples] showing the + different ways you can use [Skupper][website] to connect services + across cloud providers, data centers, and edge sites. + + [website]: https://skupper.io/ + [examples]: https://skupper.io/examples/index.html +prerequisites: | + * The `kubectl` command-line tool, version 1.15 or later + ([installation guide][install-kubectl]) + + * Access to at least one Kubernetes cluster, from [any provider you + choose][kube-providers] + + [install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ + [kube-providers]: https://skupper.io/start/kubernetes.html +next_steps: | + Check out the other [examples][examples] on the Skupper website. +about_this_example: | + This example was produced using [Skewer][skewer], a library for + documenting and testing Skupper examples. + + [skewer]: https://github.com/skupperproject/skewer + + Skewer provides utility functions for generating the README and + running the example steps. Use the `./plano` command in the project + root to see what is available. + + To quickly stand up the example using Minikube, try the `./plano demo` + command. diff --git a/external/skewer/python/skewer/tests.py b/external/skewer/python/skewer/tests.py new file mode 100644 index 0000000..7fa00b6 --- /dev/null +++ b/external/skewer/python/skewer/tests.py @@ -0,0 +1,67 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * +from skewer import * + +@test +def plano_commands(): + with working_dir("example"): + run("./plano") + run("./plano generate") + run("./plano render") + run("./plano clean") + +@test +def config_files(): + check_file("config/.github/workflows/main.yaml") + check_file("config/.gitignore") + check_file("config/.plano.py") + + parse_yaml(read("config/.github/workflows/main.yaml")) + +@test +def generate_readme_(): + with working_dir("example"): + generate_readme("skewer.yaml", "README.md") + check_file("README.md") + +@test +def run_steps_(): + with working_dir("example"): + with Minikube("skewer.yaml") as mk: + run_steps("skewer.yaml", kubeconfigs=mk.kubeconfigs, work_dir=mk.work_dir, debug=True) + +@test +def run_steps_demo(): + with working_dir("example"): + with Minikube("skewer.yaml") as mk: + run_steps("skewer.yaml", kubeconfigs=mk.kubeconfigs, work_dir=mk.work_dir, debug=True) + +@test +def run_steps_debug(): + with working_dir("example"): + with expect_error(): + with working_env(SKEWER_FAIL=1): + with Minikube("skewer.yaml") as mk: + run_steps("skewer.yaml", kubeconfigs=mk.kubeconfigs, work_dir=mk.work_dir, debug=True) + +if __name__ == "__main__": + import sys + run_tests(sys.modules[__name__]) diff --git a/frontend/kubernetes.yaml b/frontend/kubernetes.yaml index f59ef88..3fe04c5 100644 --- a/frontend/kubernetes.yaml +++ b/frontend/kubernetes.yaml @@ -22,18 +22,18 @@ spec: value: cluster1-kafka-brokers:9092 ports: - containerPort: 8080 ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: frontend - name: frontend -spec: - ports: - - port: 8080 - protocol: TCP - targetPort: 8080 - selector: - app: frontend - type: LoadBalancer +# --- +# apiVersion: v1 +# kind: Service +# metadata: +# labels: +# app: frontend +# name: frontend +# spec: +# ports: +# - port: 8080 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: frontend +# type: LoadBalancer diff --git a/kafka-cluster/cluster1.yaml b/kafka-cluster/cluster1.yaml index dd8bdc0..59d1c76 100644 --- a/kafka-cluster/cluster1.yaml +++ b/kafka-cluster/cluster1.yaml @@ -4,7 +4,7 @@ metadata: name: cluster1 spec: kafka: - version: 2.8.0 + version: 3.4.0 replicas: 1 listeners: - name: plain @@ -15,18 +15,68 @@ spec: brokers: - broker: 0 advertisedHost: cluster1-kafka-0.cluster1-kafka-brokers + - name: tls + port: 9093 + type: internal + tls: true config: offsets.topic.replication.factor: 1 transaction.state.log.replication.factor: 1 transaction.state.log.min.isr: 1 - log.message.format.version: "2.8" - inter.broker.protocol.version: "2.8" + default.replication.factor: 1 + min.insync.replicas: 1 + inter.broker.protocol.version: "3.4" storage: type: ephemeral zookeeper: replicas: 1 storage: type: ephemeral + entityOperator: + topicOperator: {} + userOperator: {} +# --- +# apiVersion: kafka.strimzi.io/v1beta2 +# kind: KafkaTopic +# metadata: +# name: topic1 +# labels: +# strimzi.io/cluster: cluster1 +# spec: +# partitions: 1 +# replicas: 1 +# config: +# retention.ms: 1000 +# segment.bytes: 104857600 +# apiVersion: kafka.strimzi.io/v1beta2 +# kind: Kafka +# metadata: +# name: cluster1 +# spec: +# kafka: +# version: 3.4.0 +# replicas: 1 +# listeners: +# - name: plain +# port: 9092 +# type: internal +# tls: false +# configuration: +# brokers: +# - broker: 0 +# advertisedHost: cluster1-kafka-0.cluster1-kafka-brokers +# config: +# offsets.topic.replication.factor: 1 +# transaction.state.log.replication.factor: 1 +# transaction.state.log.min.isr: 1 +# log.message.format.version: "2.8" +# inter.broker.protocol.version: "2.8" +# storage: +# type: ephemeral +# zookeeper: +# replicas: 1 +# storage: +# type: ephemeral --- apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaTopic diff --git a/kafka-cluster/strimzi.yaml b/kafka-cluster/strimzi.yaml index a860193..839a68b 100644 --- a/kafka-cluster/strimzi.yaml +++ b/kafka-cluster/strimzi.yaml @@ -1,20 +1,191 @@ + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: strimzi-cluster-operator + labels: + app: strimzi + namespace: private +spec: + replicas: 1 + selector: + matchLabels: + name: strimzi-cluster-operator + strimzi.io/kind: cluster-operator + template: + metadata: + labels: + name: strimzi-cluster-operator + strimzi.io/kind: cluster-operator + spec: + serviceAccountName: strimzi-cluster-operator + volumes: + - name: strimzi-tmp + emptyDir: + medium: Memory + sizeLimit: 1Mi + - name: co-config-volume + configMap: + name: strimzi-cluster-operator + containers: + - name: strimzi-cluster-operator + image: 'quay.io/strimzi/operator:0.34.0' + ports: + - containerPort: 8080 + name: http + args: + - /opt/strimzi/bin/cluster_operator_run.sh + volumeMounts: + - name: strimzi-tmp + mountPath: /tmp + - name: co-config-volume + mountPath: /opt/strimzi/custom-config/ + env: + - name: STRIMZI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: STRIMZI_FULL_RECONCILIATION_INTERVAL_MS + value: '120000' + - name: STRIMZI_OPERATION_TIMEOUT_MS + value: '300000' + - name: STRIMZI_DEFAULT_TLS_SIDECAR_ENTITY_OPERATOR_IMAGE + value: 'quay.io/strimzi/kafka:0.34.0-kafka-3.4.0' + - name: STRIMZI_DEFAULT_KAFKA_EXPORTER_IMAGE + value: 'quay.io/strimzi/kafka:0.34.0-kafka-3.4.0' + - name: STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE + value: 'quay.io/strimzi/kafka:0.34.0-kafka-3.4.0' + - name: STRIMZI_KAFKA_IMAGES + value: | + 3.3.1=quay.io/strimzi/kafka:0.34.0-kafka-3.3.1 + 3.3.2=quay.io/strimzi/kafka:0.34.0-kafka-3.3.2 + 3.4.0=quay.io/strimzi/kafka:0.34.0-kafka-3.4.0 + - name: STRIMZI_KAFKA_CONNECT_IMAGES + value: | + 3.3.1=quay.io/strimzi/kafka:0.34.0-kafka-3.3.1 + 3.3.2=quay.io/strimzi/kafka:0.34.0-kafka-3.3.2 + 3.4.0=quay.io/strimzi/kafka:0.34.0-kafka-3.4.0 + - name: STRIMZI_KAFKA_MIRROR_MAKER_IMAGES + value: | + 3.3.1=quay.io/strimzi/kafka:0.34.0-kafka-3.3.1 + 3.3.2=quay.io/strimzi/kafka:0.34.0-kafka-3.3.2 + 3.4.0=quay.io/strimzi/kafka:0.34.0-kafka-3.4.0 + - name: STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES + value: | + 3.3.1=quay.io/strimzi/kafka:0.34.0-kafka-3.3.1 + 3.3.2=quay.io/strimzi/kafka:0.34.0-kafka-3.3.2 + 3.4.0=quay.io/strimzi/kafka:0.34.0-kafka-3.4.0 + - name: STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE + value: 'quay.io/strimzi/operator:0.34.0' + - name: STRIMZI_DEFAULT_USER_OPERATOR_IMAGE + value: 'quay.io/strimzi/operator:0.34.0' + - name: STRIMZI_DEFAULT_KAFKA_INIT_IMAGE + value: 'quay.io/strimzi/operator:0.34.0' + - name: STRIMZI_DEFAULT_KAFKA_BRIDGE_IMAGE + value: 'quay.io/strimzi/kafka-bridge:0.25.0' + - name: STRIMZI_DEFAULT_JMXTRANS_IMAGE + value: 'quay.io/strimzi/jmxtrans:0.34.0' + - name: STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE + value: 'quay.io/strimzi/kaniko-executor:0.34.0' + - name: STRIMZI_DEFAULT_MAVEN_BUILDER + value: 'quay.io/strimzi/maven-builder:0.34.0' + - name: STRIMZI_OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: STRIMZI_FEATURE_GATES + value: '-UseStrimziPodSets' + - name: STRIMZI_LEADER_ELECTION_ENABLED + value: 'true' + - name: STRIMZI_LEADER_ELECTION_LEASE_NAME + value: strimzi-cluster-operator + - name: STRIMZI_LEADER_ELECTION_LEASE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: STRIMZI_LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + fieldPath: metadata.name + livenessProbe: + httpGet: + path: /healthy + port: http + initialDelaySeconds: 10 + periodSeconds: 30 + readinessProbe: + httpGet: + path: /ready + port: http + initialDelaySeconds: 10 + periodSeconds: 30 + resources: + limits: + cpu: 1000m + memory: 384Mi + requests: + cpu: 200m + memory: 384Mi + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-kafka-broker + labels: + app: strimzi +rules: + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: strimzi-cluster-operator-leader-election + labels: + app: strimzi + namespace: private +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: private +roleRef: + kind: ClusterRole + name: strimzi-cluster-operator-leader-election + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: strimzi-cluster-operator + labels: + app: strimzi + namespace: private + --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kafkas.kafka.strimzi.io + name: kafkaconnects.kafka.strimzi.io labels: app: strimzi strimzi.io/crd-install: 'true' spec: group: kafka.strimzi.io names: - kind: Kafka - listKind: KafkaList - singular: kafka - plural: kafkas + kind: KafkaConnect + listKind: KafkaConnectList + singular: kafkaconnect + plural: kafkaconnects shortNames: - - k + - kc categories: - strimzi scope: Namespaced @@ -26,23 +197,19 @@ spec: storage: true subresources: status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector additionalPrinterColumns: - - name: Desired Kafka replicas - description: The desired number of Kafka replicas in the cluster - jsonPath: .spec.kafka.replicas - type: integer - - name: Desired ZK replicas - description: The desired number of ZooKeeper replicas in the cluster - jsonPath: .spec.zookeeper.replicas + - name: Desired replicas + description: The desired number of Kafka Connect replicas + jsonPath: .spec.replicas type: integer - name: Ready description: The state of the custom resource jsonPath: '.status.conditions[?(@.type=="Ready")].status' type: string - - name: Warnings - description: Warnings related to the custom resource - jsonPath: '.status.conditions[?(@.type=="Warning")].status' - type: string schema: openAPIV3Schema: type: object @@ -50,8369 +217,263 @@ spec: spec: type: object properties: - kafka: + version: + type: string + description: >- + The Kafka Connect version. Defaults to + {DefaultKafkaVersion}. Consult the user documentation to + understand the process required to upgrade or downgrade the + version. + replicas: + type: integer + description: The number of pods in the Kafka Connect group. + image: + type: string + description: The docker image for the pods. + bootstrapServers: + type: string + description: >- + Bootstrap servers to connect to. This should be given as a + comma separated list of __:__ pairs. + tls: type: object properties: - version: - type: string - description: >- - The kafka broker version. Defaults to - {DefaultKafkaVersion}. Consult the user documentation to - understand the process required to upgrade or downgrade - the version. - replicas: - type: integer - minimum: 1 - description: The number of pods in the cluster. - image: - type: string - description: >- - The docker image for the pods. The default value depends - on the configured `Kafka.spec.kafka.version`. - listeners: + trustedCertificates: type: array - minItems: 1 items: type: object properties: - name: + certificate: type: string - pattern: '^[a-z0-9]{1,11}$' - description: >- - Name of the listener. The name will be used to - identify the listener and the related Kubernetes - objects. The name has to be unique within given a - Kafka cluster. The name can consist of lowercase - characters and numbers and be up to 11 characters - long. - port: - type: integer - minimum: 9092 - description: >- - Port number used by the listener inside Kafka. The - port number has to be unique within a given Kafka - cluster. Allowed port numbers are 9092 and higher - with the exception of ports 9404 and 9999, which - are already used for Prometheus and JMX. Depending - on the listener type, the port number might not be - the same as the port number that connects Kafka - clients. - type: + description: The name of the file certificate in the Secret. + secretName: type: string - enum: - - internal - - route - - loadbalancer - - nodeport - - ingress - description: > - Type of the listener. Currently the supported - types are `internal`, `route`, `loadbalancer`, - `nodeport` and `ingress`. - - - * `internal` type exposes Kafka internally only - within the Kubernetes cluster. - - * `route` type uses OpenShift Routes to expose - Kafka. - - * `loadbalancer` type uses LoadBalancer type - services to expose Kafka. - - * `nodeport` type uses NodePort type services to - expose Kafka. - - * `ingress` type uses Kubernetes Nginx Ingress to - expose Kafka. - tls: - type: boolean - description: >- - Enables TLS encryption on the listener. This is a - required property. - authentication: - type: object - properties: - accessTokenIsJwt: - type: boolean - description: >- - Configure whether the access token is treated - as JWT. This must be set to `false` if the - authorization server returns opaque tokens. - Defaults to `true`. - checkAccessTokenType: - type: boolean - description: >- - Configure whether the access token type check - is performed or not. This should be set to - `false` if the authorization server does not - include 'typ' claim in JWT token. Defaults to - `true`. - checkAudience: - type: boolean - description: >- - Enable or disable audience checking. Audience - checks identify the recipients of tokens. If - audience checking is enabled, the OAuth Client - ID also has to be configured using the - `clientId` property. The Kafka broker will - reject tokens that do not have its `clientId` - in their `aud` (audience) claim.Default value - is `false`. - checkIssuer: - type: boolean - description: >- - Enable or disable issuer checking. By default - issuer is checked using the value configured - by `validIssuerUri`. Default value is `true`. - clientId: - type: string - description: >- - OAuth Client ID which the Kafka broker can use - to authenticate against the authorization - server and use the introspect endpoint URI. - clientSecret: - type: object - properties: - key: - type: string - description: >- - The key under which the secret value is - stored in the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret - containing the secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth - client secret which the Kafka broker can use - to authenticate against the authorization - server and use the introspect endpoint URI. - customClaimCheck: - type: string - description: >- - JsonPath filter query to be applied to the JWT - token or to the response of the introspection - endpoint for additional token validation. Not - set by default. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. - Default value is `false`. - enableECDSA: - type: boolean - description: >- - Enable or disable ECDSA support by installing - BouncyCastle crypto provider. Default value is - `false`. - enableOauthBearer: - type: boolean - description: >- - Enable or disable OAuth authentication over - SASL_OAUTHBEARER. Default value is `true`. - enablePlain: - type: boolean - description: >- - Enable or disable OAuth authentication over - SASL_PLAIN. There is no re-authentication - support when this mechanism is used. Default - value is `false`. - fallbackUserNameClaim: - type: string - description: >- - The fallback username claim to be used for the - user id if the claim specified by - `userNameClaim` is not present. This is useful - when `client_credentials` authentication only - results in the client id being provided in - another claim. It only takes effect if - `userNameClaim` is set. - fallbackUserNamePrefix: - type: string - description: >- - The prefix to use with the value of - `fallbackUserNameClaim` to construct the user - id. This only takes effect if - `fallbackUserNameClaim` is true, and the value - is present for the claim. Mapping usernames - and client ids into the same user id space is - useful in preventing name collisions. - introspectionEndpointUri: - type: string - description: >- - URI of the token introspection endpoint which - can be used to validate opaque non-JWT tokens. - jwksEndpointUri: - type: string - description: >- - URI of the JWKS certificate endpoint, which - can be used for local JWT validation. - jwksExpirySeconds: - type: integer - minimum: 1 - description: >- - Configures how often are the JWKS certificates - considered valid. The expiry interval has to - be at least 60 seconds longer then the refresh - interval specified in `jwksRefreshSeconds`. - Defaults to 360 seconds. - jwksMinRefreshPauseSeconds: - type: integer - minimum: 0 - description: >- - The minimum pause between two consecutive - refreshes. When an unknown signing key is - encountered the refresh is scheduled - immediately, but will always wait for this - minimum pause. Defaults to 1 second. - jwksRefreshSeconds: - type: integer - minimum: 1 - description: >- - Configures how often are the JWKS certificates - refreshed. The refresh interval has to be at - least 60 seconds shorter then the expiry - interval specified in `jwksExpirySeconds`. - Defaults to 300 seconds. - maxSecondsWithoutReauthentication: - type: integer - description: >- - Maximum number of seconds the authenticated - session remains valid without - re-authentication. This enables Apache Kafka - re-authentication feature, and causes sessions - to expire when the access token expires. If - the access token expires before max time or if - max time is reached, the client has to - re-authenticate, otherwise the server will - drop the connection. Not set by default - the - authenticated session does not expire when the - access token expires. This option only applies - to SASL_OAUTHBEARER authentication mechanism - (when `enableOauthBearer` is `true`). - tlsTrustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the - OAuth server. - tokenEndpointUri: - type: string - description: >- - URI of the Token Endpoint to use with - SASL_PLAIN mechanism when the client - authenticates with clientId and a secret. - type: - type: string - enum: - - tls - - scram-sha-512 - - oauth - description: >- - Authentication type. `oauth` type uses SASL - OAUTHBEARER Authentication. `scram-sha-512` - type uses SASL SCRAM-SHA-512 Authentication. - `tls` type uses TLS Client Authentication. - `tls` type is supported only on TLS listeners. - userInfoEndpointUri: - type: string - description: >- - URI of the User Info Endpoint to use as a - fallback to obtaining the user id when the - Introspection Endpoint does not return - information that can be used for the user id. - userNameClaim: - type: string - description: >- - Name of the claim from the JWT authentication - token, Introspection Endpoint response or User - Info Endpoint response which will be used to - extract the user id. Defaults to `sub`. - validIssuerUri: - type: string - description: >- - URI of the token issuer used for - authentication. - validTokenType: - type: string - description: >- - Valid value for the `token_type` attribute - returned by the Introspection Endpoint. No - default value, and not checked by default. - required: - - type - description: Authentication configuration for this listener. - configuration: - type: object - properties: - brokerCertChainAndKey: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - key: - type: string - description: The name of the private key in the Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - key - - secretName - description: >- - Reference to the `Secret` which holds the - certificate and private key pair which will be - used for this listener. The certificate can - optionally contain the whole chain. This field - can be used only with listeners with enabled - TLS encryption. - externalTrafficPolicy: - type: string - enum: - - Local - - Cluster - description: >- - Specifies whether the service routes external - traffic to node-local or cluster-wide - endpoints. `Cluster` may cause a second hop to - another node and obscures the client source - IP. `Local` avoids a second hop for - LoadBalancer and Nodeport type services and - preserves the client source IP (when supported - by the infrastructure). If unspecified, - Kubernetes will use `Cluster` as the - default.This field can be used only with - `loadbalancer` or `nodeport` type listener. - loadBalancerSourceRanges: - type: array - items: - type: string - description: >- - A list of CIDR ranges (for example - `10.0.0.0/8` or `130.211.204.1/32`) from which - clients can connect to load balancer type - listeners. If supported by the platform, - traffic through the loadbalancer is restricted - to the specified CIDR ranges. This field is - applicable only for loadbalancer type services - and is ignored if the cloud provider does not - support the feature. For more information, see - https://v1-17.docs.kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/. - This field can be used only with - `loadbalancer` type listener. - bootstrap: - type: object - properties: - alternativeNames: - type: array - items: - type: string - description: >- - Additional alternative names for the - bootstrap service. The alternative names - will be added to the list of subject - alternative names of the TLS certificates. - host: - type: string - description: >- - The bootstrap host. This field will be - used in the Ingress resource or in the - Route resource to specify the desired - hostname. This field can be used only with - `route` (optional) or `ingress` (required) - type listeners. - nodePort: - type: integer - description: >- - Node port for the bootstrap service. This - field can be used only with `nodeport` - type listener. - loadBalancerIP: - type: string - description: >- - The loadbalancer is requested with the IP - address specified in this field. This - feature depends on whether the underlying - cloud provider supports specifying the - `loadBalancerIP` when a load balancer is - created. This field is ignored if the - cloud provider does not support the - feature.This field can be used only with - `loadbalancer` type listener. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations that will be added to the - `Ingress`, `Route`, or `Service` resource. - You can use this field to configure DNS - providers such as External DNS. This field - can be used only with `loadbalancer`, - `nodeport`, `route`, or `ingress` type - listeners. - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels that will be added to the - `Ingress`, `Route`, or `Service` resource. - This field can be used only with - `loadbalancer`, `nodeport`, `route`, or - `ingress` type listeners. - description: Bootstrap configuration. - brokers: - type: array - items: - type: object - properties: - broker: - type: integer - description: >- - ID of the kafka broker (broker - identifier). Broker IDs start from 0 and - correspond to the number of broker - replicas. - advertisedHost: - type: string - description: >- - The host name which will be used in the - brokers' `advertised.brokers`. - advertisedPort: - type: integer - description: >- - The port number which will be used in - the brokers' `advertised.brokers`. - host: - type: string - description: >- - The broker host. This field will be used - in the Ingress resource or in the Route - resource to specify the desired - hostname. This field can be used only - with `route` (optional) or `ingress` - (required) type listeners. - nodePort: - type: integer - description: >- - Node port for the per-broker service. - This field can be used only with - `nodeport` type listener. - loadBalancerIP: - type: string - description: >- - The loadbalancer is requested with the - IP address specified in this field. This - feature depends on whether the - underlying cloud provider supports - specifying the `loadBalancerIP` when a - load balancer is created. This field is - ignored if the cloud provider does not - support the feature.This field can be - used only with `loadbalancer` type - listener. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations that will be added to the - `Ingress` or `Service` resource. You can - use this field to configure DNS - providers such as External DNS. This - field can be used only with - `loadbalancer`, `nodeport`, or `ingress` - type listeners. - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels that will be added to the - `Ingress`, `Route`, or `Service` - resource. This field can be used only - with `loadbalancer`, `nodeport`, - `route`, or `ingress` type listeners. - required: - - broker - description: Per-broker configurations. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the - service. Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP - family on single-stack clusters. - `RequireDualStack` fails unless there are two - IP families on dual-stack configured clusters. - If unspecified, Kubernetes will choose the - default value based on the service type. - Available on Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the - default value based on the `ipFamilyPolicy` - setting. Available on Kubernetes 1.20 and - newer. - class: - type: string - description: >- - Configures the `Ingress` class that defines - which `Ingress` controller will be used. This - field can be used only with `ingress` type - listener. If not specified, the default - Ingress controller will be used. - finalizers: - type: array - items: - type: string - description: >- - A list of finalizers which will be configured - for the `LoadBalancer` type Services created - for this listener. If supported by the - platform, the finalizer - `service.kubernetes.io/load-balancer-cleanup` - to make sure that the external load balancer - is deleted together with the service.For more - information, see - https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#garbage-collecting-load-balancers. - This field can be used only with - `loadbalancer` type listeners. - maxConnectionCreationRate: - type: integer - description: >- - The maximum connection creation rate we allow - in this listener at any time. New connections - will be throttled if the limit is - reached.Supported only on Kafka 2.7.0 and - newer. - maxConnections: - type: integer - description: >- - The maximum number of connections we allow for - this listener in the broker at any time. New - connections are blocked if the limit is - reached. - preferredNodePortAddressType: - type: string - enum: - - ExternalIP - - ExternalDNS - - InternalIP - - InternalDNS - - Hostname - description: >- - Defines which address type should be used as - the node address. Available types are: - `ExternalDNS`, `ExternalIP`, `InternalDNS`, - `InternalIP` and `Hostname`. By default, the - addresses will be used in the following order - (the first one found will be used): - - * `ExternalDNS` - - * `ExternalIP` - - * `InternalDNS` - - * `InternalIP` - - * `Hostname` - - - This field is used to select the preferred - address type, which is checked first. If no - address is found for this address type, the - other types are checked in the default order. - This field can only be used with `nodeport` - type listener. - useServiceDnsDomain: - type: boolean - description: >- - Configures whether the Kubernetes service DNS - domain should be used or not. If set to - `true`, the generated addresses will contain - the service DNS domain suffix (by default - `.cluster.local`, can be configured using - environment variable - `KUBERNETES_SERVICE_DNS_DOMAIN`). Defaults to - `false`.This field can be used only with - `internal` type listener. - description: Additional listener configuration. - networkPolicyPeers: - type: array - items: - type: object - properties: - ipBlock: - type: object - properties: - cidr: - type: string - except: - type: array - items: - type: string - namespaceSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - podSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - List of peers which should be able to connect to - this listener. Peers in this list are combined - using a logical OR operation. If this field is - empty or missing, all connections will be allowed - for this listener. If this field is present and - contains at least one item, the listener only - allows the traffic which matches at least one item - in this list. - required: - - name - - port - - type - - tls - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Kafka broker config properties with the following - prefixes cannot be set: listeners, advertised., broker., - listener., host.name, port, inter.broker.listener.name, - sasl., ssl., security., password., - principal.builder.class, log.dir, zookeeper.connect, - zookeeper.set.acl, zookeeper.ssl, - zookeeper.clientCnxnSocket, authorizer., super.user, - cruise.control.metrics.topic, - cruise.control.metrics.reporter.bootstrap.servers (with - the exception of: zookeeper.connection.timeout.ms, - ssl.cipher.suites, ssl.protocol, - ssl.enabled.protocols,cruise.control.metrics.topic.num.partitions, - cruise.control.metrics.topic.replication.factor, - cruise.control.metrics.topic.retention.ms,cruise.control.metrics.topic.auto.create.retries, - cruise.control.metrics.topic.auto.create.timeout.ms,cruise.control.metrics.topic.min.insync.replicas). - storage: - type: object - properties: - class: - type: string - description: >- - The storage class to use for dynamic volume - allocation. - deleteClaim: - type: boolean - description: >- - Specifies if the persistent volume claim has to be - deleted when the cluster is un-deployed. - id: - type: integer - minimum: 0 - description: >- - Storage identification number. It is mandatory only - for storage volumes defined in a storage of type - 'jbod'. - overrides: - type: array - items: - type: object - properties: - class: - type: string - description: >- - The storage class to use for dynamic volume - allocation for this broker. - broker: - type: integer - description: Id of the kafka broker (broker identifier). - description: >- - Overrides for individual brokers. The `overrides` - field allows to specify a different configuration - for different brokers. - selector: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Specifies a specific persistent volume to use. It - contains key:value pairs representing labels for - selecting such a volume. - size: - type: string - description: >- - When type=persistent-claim, defines the size of the - persistent volume claim (i.e 1Gi). Mandatory when - type=persistent-claim. - sizeLimit: - type: string - pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' - description: >- - When type=ephemeral, defines the total amount of - local storage required for this EmptyDir volume (for - example 1Gi). - type: - type: string - enum: - - ephemeral - - persistent-claim - - jbod - description: >- - Storage type, must be either 'ephemeral', - 'persistent-claim', or 'jbod'. - volumes: - type: array - items: - type: object - properties: - class: - type: string - description: >- - The storage class to use for dynamic volume - allocation. - deleteClaim: - type: boolean - description: >- - Specifies if the persistent volume claim has - to be deleted when the cluster is un-deployed. - id: - type: integer - minimum: 0 - description: >- - Storage identification number. It is mandatory - only for storage volumes defined in a storage - of type 'jbod'. - overrides: - type: array - items: - type: object - properties: - class: - type: string - description: >- - The storage class to use for dynamic - volume allocation for this broker. - broker: - type: integer - description: >- - Id of the kafka broker (broker - identifier). - description: >- - Overrides for individual brokers. The - `overrides` field allows to specify a - different configuration for different brokers. - selector: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Specifies a specific persistent volume to use. - It contains key:value pairs representing - labels for selecting such a volume. - size: - type: string - description: >- - When type=persistent-claim, defines the size - of the persistent volume claim (i.e 1Gi). - Mandatory when type=persistent-claim. - sizeLimit: - type: string - pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' - description: >- - When type=ephemeral, defines the total amount - of local storage required for this EmptyDir - volume (for example 1Gi). - type: - type: string - enum: - - ephemeral - - persistent-claim - description: >- - Storage type, must be either 'ephemeral' or - 'persistent-claim'. - required: - - type - description: >- - List of volumes as Storage objects representing the - JBOD disks array. - required: - - type - description: Storage configuration (disk). Cannot be updated. - authorization: - type: object - properties: - allowOnError: - type: boolean - description: >- - Defines whether a Kafka client should be allowed or - denied by default when the authorizer fails to query - the Open Policy Agent, for example, when it is - temporarily unavailable). Defaults to `false` - all - actions will be denied. - authorizerClass: - type: string - description: >- - Authorization implementation class, which must be - available in classpath. - clientId: - type: string - description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - delegateToKafkaAcls: - type: boolean - description: >- - Whether authorization decision should be delegated - to the 'Simple' authorizer if DENIED by Keycloak - Authorization Services policies. Default value is - `false`. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. Default - value is `false`. - expireAfterMs: - type: integer - description: >- - The expiration of the records kept in the local - cache to avoid querying the Open Policy Agent for - every request. Defines how often the cached - authorization decisions are reloaded from the Open - Policy Agent server. In milliseconds. Defaults to - `3600000`. - grantsRefreshPeriodSeconds: - type: integer - minimum: 0 - description: >- - The time between two consecutive grants refresh runs - in seconds. The default value is 60. - grantsRefreshPoolSize: - type: integer - minimum: 1 - description: >- - The number of threads to use to refresh grants for - active sessions. The more threads, the more - parallelism, so the sooner the job completes. - However, using more threads places a heavier load on - the authorization server. The default value is 5. - initialCacheCapacity: - type: integer - description: >- - Initial capacity of the local cache used by the - authorizer to avoid querying the Open Policy Agent - for every request Defaults to `5000`. - maximumCacheSize: - type: integer - description: >- - Maximum capacity of the local cache used by the - authorizer to avoid querying the Open Policy Agent - for every request. Defaults to `50000`. - superUsers: - type: array - items: - type: string - description: >- - List of super users, which are user principals with - unlimited access rights. - tlsTrustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the OAuth - server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - simple - - opa - - keycloak - - custom - description: >- - Authorization type. Currently, the supported types - are `simple`, `keycloak`, `opa` and `custom`. - `simple` authorization type uses Kafka's - `kafka.security.authorizer.AclAuthorizer` class for - authorization. `keycloak` authorization type uses - Keycloak Authorization Services for authorization. - `opa` authorization type uses Open Policy Agent - based authorization.`custom` authorization type uses - user-provided implementation for authorization. - url: - type: string - example: 'http://opa:8181/v1/data/kafka/authz/allow' - description: >- - The URL used to connect to the Open Policy Agent - server. The URL has to include the policy which will - be queried by the authorizer. This option is - required. - required: - - type - description: Authorization configuration for Kafka brokers. - rack: - type: object - properties: - topologyKey: - type: string - example: topology.kubernetes.io/zone - description: >- - A key that matches labels assigned to the Kubernetes - cluster nodes. The value of the label is used to set - the broker's `broker.rack` config and `client.rack` - in Kafka Connect. - required: - - topologyKey - description: Configuration of the `broker.rack` broker config. - brokerRackInitImage: - type: string - description: >- - The image of the init container used for initializing - the `broker.rack`. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod readiness checking. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: JVM Options for pods. - jmxOptions: - type: object - properties: - authentication: - type: object - properties: - type: - type: string - enum: - - password - description: >- - Authentication type. Currently the only - supported types are `password`.`password` type - creates a username and protected port with no - TLS. - required: - - type - description: >- - Authentication configuration for connecting to the - JMX port. - description: JMX Options for Kafka brokers. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - metricsConfig: - type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing - the configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the - structure of this configuration, see the - {JMXExporter}. - required: - - type - - valueFrom - description: Metrics configuration. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing - the configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration for Kafka. - template: - type: object - properties: - statefulset: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - podManagementPolicy: - type: string - enum: - - OrderedReady - - Parallel - description: >- - PodManagementPolicy which will be used for this - StatefulSet. Valid values are `Parallel` and - `OrderedReady`. Defaults to `Parallel`. - description: Template for Kafka `StatefulSet`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same - namespace to use for pulling any of the images - used by this Pod. When the - `STRIMZI_IMAGE_PULL_SECRETS` environment - variable in Cluster Operator and the - `imagePullSecrets` option are specified, only - the `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is - ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and - common container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds - after the processes running in the pod are sent - a termination signal, and the time when the - processes are forcibly halted with a kill - signal. Set this value to longer than the - expected cleanup time for your process. Value - must be a non-negative integer. A zero value - indicates delete immediately. You might need to - increase the grace period for very large Kafka - clusters, so that the Kafka brokers have enough - time to transfer their work to another broker - before they are terminated. Defaults to 30 - seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler - will be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an - optional list of hosts and IPs that will be - injected into the Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services - should be injected into Pod's environment - variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for Kafka `Pods`. - bootstrapService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the - service. Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP - family on single-stack clusters. - `RequireDualStack` fails unless there are two IP - families on dual-stack configured clusters. If - unspecified, Kubernetes will choose the default - value based on the service type. Available on - Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for Kafka bootstrap `Service`. - brokersService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the - service. Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP - family on single-stack clusters. - `RequireDualStack` fails unless there are two IP - families on dual-stack configured clusters. If - unspecified, Kubernetes will choose the default - value based on the service type. Available on - Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for Kafka broker `Service`. - externalBootstrapService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Kafka external bootstrap `Service`. - perPodService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: >- - Template for Kafka per-pod `Services` used for - access from outside of Kubernetes. - externalBootstrapRoute: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Kafka external bootstrap `Route`. - perPodRoute: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: >- - Template for Kafka per-pod `Routes` used for access - from outside of OpenShift. - externalBootstrapIngress: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Kafka external bootstrap `Ingress`. - perPodIngress: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: >- - Template for Kafka per-pod `Ingress` used for access - from outside of Kubernetes. - persistentVolumeClaim: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for all Kafka `PersistentVolumeClaims`. - podDisruptionBudget: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is - allowed when the `maxUnavailable` number of pods - or fewer are unavailable after the eviction. - Setting this value to 0 prevents all voluntary - evictions, so the pods must be evicted manually. - Defaults to 1. - description: Template for Kafka `PodDisruptionBudget`. - kafkaContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka broker container. - initContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka init container. - clusterCaCert: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: >- - Template for Secret with Kafka Cluster certificate - public key. - clusterRoleBinding: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for the Kafka ClusterRoleBinding. - description: >- - Template for Kafka cluster resources. The template - allows users to specify how are the `StatefulSet`, - `Pods` and `Services` generated. - required: - - replicas - - listeners - - storage - description: Configuration of the Kafka cluster. - zookeeper: - type: object - properties: - replicas: - type: integer - minimum: 1 - description: The number of pods in the cluster. - image: - type: string - description: The docker image for the pods. - storage: - type: object - properties: - class: - type: string - description: >- - The storage class to use for dynamic volume - allocation. - deleteClaim: - type: boolean - description: >- - Specifies if the persistent volume claim has to be - deleted when the cluster is un-deployed. - id: - type: integer - minimum: 0 - description: >- - Storage identification number. It is mandatory only - for storage volumes defined in a storage of type - 'jbod'. - overrides: - type: array - items: - type: object - properties: - class: - type: string - description: >- - The storage class to use for dynamic volume - allocation for this broker. - broker: - type: integer - description: Id of the kafka broker (broker identifier). - description: >- - Overrides for individual brokers. The `overrides` - field allows to specify a different configuration - for different brokers. - selector: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Specifies a specific persistent volume to use. It - contains key:value pairs representing labels for - selecting such a volume. - size: - type: string - description: >- - When type=persistent-claim, defines the size of the - persistent volume claim (i.e 1Gi). Mandatory when - type=persistent-claim. - sizeLimit: - type: string - pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' - description: >- - When type=ephemeral, defines the total amount of - local storage required for this EmptyDir volume (for - example 1Gi). - type: - type: string - enum: - - ephemeral - - persistent-claim - description: >- - Storage type, must be either 'ephemeral' or - 'persistent-claim'. - required: - - type - description: Storage configuration (disk). Cannot be updated. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The ZooKeeper broker config. Properties with the - following prefixes cannot be set: server., dataDir, - dataLogDir, clientPort, authProvider, quorum.auth, - requireClientAuthScheme, snapshot.trust.empty, - standaloneEnabled, reconfigEnabled, - 4lw.commands.whitelist, secureClientPort, ssl., - serverCnxnFactory, sslQuorum (with the exception of: - ssl.protocol, ssl.quorum.protocol, ssl.enabledProtocols, - ssl.quorum.enabledProtocols, ssl.ciphersuites, - ssl.quorum.ciphersuites, ssl.hostnameVerification, - ssl.quorum.hostnameVerification). - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod readiness checking. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: JVM Options for pods. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - metricsConfig: - type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing - the configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the - structure of this configuration, see the - {JMXExporter}. - required: - - type - - valueFrom - description: Metrics configuration. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing - the configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration for ZooKeeper. - template: - type: object - properties: - statefulset: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - podManagementPolicy: - type: string - enum: - - OrderedReady - - Parallel - description: >- - PodManagementPolicy which will be used for this - StatefulSet. Valid values are `Parallel` and - `OrderedReady`. Defaults to `Parallel`. - description: Template for ZooKeeper `StatefulSet`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same - namespace to use for pulling any of the images - used by this Pod. When the - `STRIMZI_IMAGE_PULL_SECRETS` environment - variable in Cluster Operator and the - `imagePullSecrets` option are specified, only - the `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is - ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and - common container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds - after the processes running in the pod are sent - a termination signal, and the time when the - processes are forcibly halted with a kill - signal. Set this value to longer than the - expected cleanup time for your process. Value - must be a non-negative integer. A zero value - indicates delete immediately. You might need to - increase the grace period for very large Kafka - clusters, so that the Kafka brokers have enough - time to transfer their work to another broker - before they are terminated. Defaults to 30 - seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler - will be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an - optional list of hosts and IPs that will be - injected into the Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services - should be injected into Pod's environment - variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for ZooKeeper `Pods`. - clientService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the - service. Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP - family on single-stack clusters. - `RequireDualStack` fails unless there are two IP - families on dual-stack configured clusters. If - unspecified, Kubernetes will choose the default - value based on the service type. Available on - Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for ZooKeeper client `Service`. - nodesService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the - service. Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP - family on single-stack clusters. - `RequireDualStack` fails unless there are two IP - families on dual-stack configured clusters. If - unspecified, Kubernetes will choose the default - value based on the service type. Available on - Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for ZooKeeper nodes `Service`. - persistentVolumeClaim: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for all ZooKeeper `PersistentVolumeClaims`. - podDisruptionBudget: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is - allowed when the `maxUnavailable` number of pods - or fewer are unavailable after the eviction. - Setting this value to 0 prevents all voluntary - evictions, so the pods must be evicted manually. - Defaults to 1. - description: Template for ZooKeeper `PodDisruptionBudget`. - zookeeperContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the ZooKeeper container. - description: >- - Template for ZooKeeper cluster resources. The template - allows users to specify how are the `StatefulSet`, - `Pods` and `Services` generated. - required: - - replicas - - storage - description: Configuration of the ZooKeeper cluster. - entityOperator: - type: object - properties: - topicOperator: - type: object - properties: - watchedNamespace: - type: string - description: The namespace the Topic Operator should watch. - image: - type: string - description: The image to use for the Topic Operator. - reconciliationIntervalSeconds: - type: integer - minimum: 0 - description: Interval between periodic reconciliations. - zookeeperSessionTimeoutSeconds: - type: integer - minimum: 0 - description: Timeout for the ZooKeeper session. - startupProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod startup checking. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod readiness checking. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - topicMetadataMaxAttempts: - type: integer - minimum: 0 - description: The number of attempts at getting topic metadata. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: >- - Logging type, must be either 'inline' or - 'external'. - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap - containing the configuration. - description: >- - `ConfigMap` entry where the logging - configuration is stored. - required: - - type - description: Logging configuration. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging - is enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will - be passed using the `-D` option to the JVM. - description: JVM Options for pods. - description: Configuration of the Topic Operator. - userOperator: - type: object - properties: - watchedNamespace: - type: string - description: The namespace the User Operator should watch. - image: - type: string - description: The image to use for the User Operator. - reconciliationIntervalSeconds: - type: integer - minimum: 0 - description: Interval between periodic reconciliations. - zookeeperSessionTimeoutSeconds: - type: integer - minimum: 0 - description: Timeout for the ZooKeeper session. - secretPrefix: - type: string - description: >- - The prefix that will be added to the KafkaUser name - to be used as the Secret name. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod readiness checking. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: >- - Logging type, must be either 'inline' or - 'external'. - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap - containing the configuration. - description: >- - `ConfigMap` entry where the logging - configuration is stored. - required: - - type - description: Logging configuration. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging - is enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will - be passed using the `-D` option to the JVM. - description: JVM Options for pods. - description: Configuration of the User Operator. - tlsSidecar: - type: object - properties: - image: - type: string - description: The docker image for the container. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod liveness checking. - logLevel: - type: string - enum: - - emerg - - alert - - crit - - err - - warning - - notice - - info - - debug - description: >- - The log level for the TLS sidecar. Default value is - `notice`. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod readiness checking. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - description: TLS sidecar configuration. - template: - type: object - properties: - deployment: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Entity Operator `Deployment`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same - namespace to use for pulling any of the images - used by this Pod. When the - `STRIMZI_IMAGE_PULL_SECRETS` environment - variable in Cluster Operator and the - `imagePullSecrets` option are specified, only - the `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is - ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and - common container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds - after the processes running in the pod are sent - a termination signal, and the time when the - processes are forcibly halted with a kill - signal. Set this value to longer than the - expected cleanup time for your process. Value - must be a non-negative integer. A zero value - indicates delete immediately. You might need to - increase the grace period for very large Kafka - clusters, so that the Kafka brokers have enough - time to transfer their work to another broker - before they are terminated. Defaults to 30 - seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler - will be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an - optional list of hosts and IPs that will be - injected into the Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services - should be injected into Pod's environment - variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for Entity Operator `Pods`. - tlsSidecarContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: >- - Template for the Entity Operator TLS sidecar - container. - topicOperatorContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Entity Topic Operator container. - userOperatorContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Entity User Operator container. - description: >- - Template for Entity Operator resources. The template - allows users to specify how is the `Deployment` and - `Pods` generated. - description: Configuration of the Entity Operator. - clusterCa: - type: object - properties: - generateCertificateAuthority: - type: boolean - description: >- - If true then Certificate Authority certificates will be - generated automatically. Otherwise the user will need to - provide a Secret with the CA certificate. Default is - true. - generateSecretOwnerReference: - type: boolean - description: >- - If `true`, the Cluster and Client CA Secrets are - configured with the `ownerReference` set to the `Kafka` - resource. If the `Kafka` resource is deleted when - `true`, the CA Secrets are also deleted. If `false`, the - `ownerReference` is disabled. If the `Kafka` resource is - deleted when `false`, the CA Secrets are retained and - available for reuse. Default is `true`. - validityDays: - type: integer - minimum: 1 - description: >- - The number of days generated certificates should be - valid for. The default is 365. - renewalDays: - type: integer - minimum: 1 - description: >- - The number of days in the certificate renewal period. - This is the number of days before the a certificate - expires during which renewal actions may be performed. - When `generateCertificateAuthority` is true, this will - cause the generation of a new certificate. When - `generateCertificateAuthority` is true, this will cause - extra logging at WARN level about the pending - certificate expiry. Default is 30. - certificateExpirationPolicy: - type: string - enum: - - renew-certificate - - replace-key - description: >- - How should CA certificate expiration be handled when - `generateCertificateAuthority=true`. The default is for - a new CA certificate to be generated reusing the - existing private key. - description: Configuration of the cluster certificate authority. - clientsCa: - type: object - properties: - generateCertificateAuthority: - type: boolean - description: >- - If true then Certificate Authority certificates will be - generated automatically. Otherwise the user will need to - provide a Secret with the CA certificate. Default is - true. - generateSecretOwnerReference: - type: boolean - description: >- - If `true`, the Cluster and Client CA Secrets are - configured with the `ownerReference` set to the `Kafka` - resource. If the `Kafka` resource is deleted when - `true`, the CA Secrets are also deleted. If `false`, the - `ownerReference` is disabled. If the `Kafka` resource is - deleted when `false`, the CA Secrets are retained and - available for reuse. Default is `true`. - validityDays: - type: integer - minimum: 1 - description: >- - The number of days generated certificates should be - valid for. The default is 365. - renewalDays: - type: integer - minimum: 1 - description: >- - The number of days in the certificate renewal period. - This is the number of days before the a certificate - expires during which renewal actions may be performed. - When `generateCertificateAuthority` is true, this will - cause the generation of a new certificate. When - `generateCertificateAuthority` is true, this will cause - extra logging at WARN level about the pending - certificate expiry. Default is 30. - certificateExpirationPolicy: - type: string - enum: - - renew-certificate - - replace-key - description: >- - How should CA certificate expiration be handled when - `generateCertificateAuthority=true`. The default is for - a new CA certificate to be generated reusing the - existing private key. - description: Configuration of the clients certificate authority. - cruiseControl: - type: object - properties: - image: - type: string - description: The docker image for the pods. - tlsSidecar: - type: object - properties: - image: - type: string - description: The docker image for the container. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod liveness checking. - logLevel: - type: string - enum: - - emerg - - alert - - crit - - err - - warning - - notice - - info - - debug - description: >- - The log level for the TLS sidecar. Default value is - `notice`. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. - Defaults to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is - first checked. Default to 15 seconds. Minimum - value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to - be considered successful after having failed. - Defaults to 1. Must be 1 for liveness. Minimum - value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. - Default to 5 seconds. Minimum value is 1. - description: Pod readiness checking. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - description: TLS sidecar configuration. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - CPU and memory resources to reserve for the Cruise - Control container. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod liveness checking for the Cruise Control container. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod readiness checking for the Cruise Control container. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: JVM Options for the Cruise Control container. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing - the configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration (Log4j 2) for Cruise Control. - template: - type: object - properties: - deployment: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Cruise Control `Deployment`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same - namespace to use for pulling any of the images - used by this Pod. When the - `STRIMZI_IMAGE_PULL_SECRETS` environment - variable in Cluster Operator and the - `imagePullSecrets` option are specified, only - the `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is - ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and - common container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds - after the processes running in the pod are sent - a termination signal, and the time when the - processes are forcibly halted with a kill - signal. Set this value to longer than the - expected cleanup time for your process. Value - must be a non-negative integer. A zero value - indicates delete immediately. You might need to - increase the grace period for very large Kafka - clusters, so that the Kafka brokers have enough - time to transfer their work to another broker - before they are terminated. Defaults to 30 - seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler - will be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an - optional list of hosts and IPs that will be - injected into the Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services - should be injected into Pod's environment - variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for Cruise Control `Pods`. - apiService: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the - service. Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP - family on single-stack clusters. - `RequireDualStack` fails unless there are two IP - families on dual-stack configured clusters. If - unspecified, Kubernetes will choose the default - value based on the service type. Available on - Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for Cruise Control API `Service`. - podDisruptionBudget: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is - allowed when the `maxUnavailable` number of pods - or fewer are unavailable after the eviction. - Setting this value to 0 prevents all voluntary - evictions, so the pods must be evicted manually. - Defaults to 1. - description: Template for Cruise Control `PodDisruptionBudget`. - cruiseControlContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Cruise Control container. - tlsSidecarContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: >- - Template for the Cruise Control TLS sidecar - container. - description: >- - Template to specify how Cruise Control resources, - `Deployments` and `Pods`, are generated. - brokerCapacity: - type: object - properties: - disk: - type: string - pattern: '^[0-9]+([.][0-9]*)?([KMGTPE]i?|e[0-9]+)?$' - description: >- - Broker capacity for disk in bytes, for example, - 100Gi. - cpuUtilization: - type: integer - minimum: 0 - maximum: 100 - description: >- - Broker capacity for CPU resource utilization as a - percentage (0 - 100). - inboundNetwork: - type: string - pattern: '[0-9]+([KMG]i?)?B/s' - description: >- - Broker capacity for inbound network throughput in - bytes per second, for example, 10000KB/s. - outboundNetwork: - type: string - pattern: '[0-9]+([KMG]i?)?B/s' - description: >- - Broker capacity for outbound network throughput in - bytes per second, for example 10000KB/s. - description: The Cruise Control `brokerCapacity` configuration. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Cruise Control configuration. For a full list of - configuration options refer to - https://github.com/linkedin/cruise-control/wiki/Configurations. - Note that properties with the following prefixes cannot - be set: bootstrap.servers, client.id, zookeeper., - network., security., - failed.brokers.zk.path,webserver.http., - webserver.api.urlprefix, webserver.session.path, - webserver.accesslog., two.step., - request.reason.required,metric.reporter.sampler.bootstrap.servers, - metric.reporter.topic, - partition.metric.sample.store.topic, - broker.metric.sample.store.topic,capacity.config.file, - self.healing., anomaly.detection., ssl. (with the - exception of: ssl.cipher.suites, ssl.protocol, - ssl.enabled.protocols, - webserver.http.cors.enabled,webserver.http.cors.origin, - webserver.http.cors.exposeheaders). - metricsConfig: - type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing - the configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the - structure of this configuration, see the - {JMXExporter}. - required: - - type - - valueFrom - description: Metrics configuration. - description: >- - Configuration for Cruise Control deployment. Deploys a - Cruise Control instance when specified. - jmxTrans: - type: object - properties: - image: - type: string - description: The image to use for the JmxTrans. - outputDefinitions: - type: array - items: - type: object - properties: - outputType: - type: string - description: >- - Template for setting the format of the data that - will be pushed.For more information see - https://github.com/jmxtrans/jmxtrans/wiki/OutputWriters[JmxTrans - OutputWriters]. - host: - type: string - description: >- - The DNS/hostname of the remote host that the data - is pushed to. - port: - type: integer - description: >- - The port of the remote host that the data is - pushed to. - flushDelayInSeconds: - type: integer - description: >- - How many seconds the JmxTrans waits before pushing - a new set of data out. - typeNames: - type: array - items: - type: string - description: >- - Template for filtering data to be included in - response to a wildcard query. For more information - see - https://github.com/jmxtrans/jmxtrans/wiki/Queries[JmxTrans - queries]. - name: - type: string - description: >- - Template for setting the name of the output - definition. This is used to identify where to send - the results of queries should be sent. - required: - - outputType - - name - description: >- - Defines the output hosts that will be referenced later - on. For more information on these properties see, - xref:type-JmxTransOutputDefinitionTemplate-reference[`JmxTransOutputDefinitionTemplate` - schema reference]. - logLevel: - type: string - description: >- - Sets the logging level of the JmxTrans deployment.For - more information see, - https://github.com/jmxtrans/jmxtrans-agent/wiki/Troubleshooting[JmxTrans - Logging Level]. - kafkaQueries: - type: array - items: - type: object - properties: - targetMBean: - type: string - description: >- - If using wildcards instead of a specific MBean - then the data is gathered from multiple MBeans. - Otherwise if specifying an MBean then data is - gathered from that specified MBean. - attributes: - type: array - items: - type: string - description: >- - Determine which attributes of the targeted MBean - should be included. - outputs: - type: array - items: - type: string - description: >- - List of the names of output definitions specified - in the spec.kafka.jmxTrans.outputDefinitions that - have defined where JMX metrics are pushed to, and - in which data format. - required: - - targetMBean - - attributes - - outputs - description: >- - Queries to send to the Kafka brokers to define what data - should be read from each broker. For more information on - these properties see, - xref:type-JmxTransQueryTemplate-reference[`JmxTransQueryTemplate` - schema reference]. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - template: - type: object - properties: - deployment: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for JmxTrans `Deployment`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same - namespace to use for pulling any of the images - used by this Pod. When the - `STRIMZI_IMAGE_PULL_SECRETS` environment - variable in Cluster Operator and the - `imagePullSecrets` option are specified, only - the `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is - ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and - common container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds - after the processes running in the pod are sent - a termination signal, and the time when the - processes are forcibly halted with a kill - signal. Set this value to longer than the - expected cleanup time for your process. Value - must be a non-negative integer. A zero value - indicates delete immediately. You might need to - increase the grace period for very large Kafka - clusters, so that the Kafka brokers have enough - time to transfer their work to another broker - before they are terminated. Defaults to 30 - seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler - will be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an - optional list of hosts and IPs that will be - injected into the Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services - should be injected into Pod's environment - variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for JmxTrans `Pods`. - container: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for JmxTrans container. - description: Template for JmxTrans resources. - required: - - outputDefinitions - - kafkaQueries - description: >- - Configuration for JmxTrans. When the property is present a - JmxTrans deployment is created for gathering JMX metrics - from each Kafka broker. For more information see - https://github.com/jmxtrans/jmxtrans[JmxTrans GitHub]. - kafkaExporter: - type: object - properties: - image: - type: string - description: The docker image for the pods. - groupRegex: - type: string - description: >- - Regular expression to specify which consumer groups to - collect. Default value is `.*`. - topicRegex: - type: string - description: >- - Regular expression to specify which topics to collect. - Default value is `.*`. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - logging: - type: string - description: >- - Only log messages with the given severity or above. - Valid levels: [`debug`, `info`, `warn`, `error`, - `fatal`]. Default log level is `info`. - enableSaramaLogging: - type: boolean - description: >- - Enable Sarama logging, a Go client library used by the - Kafka Exporter. - template: - type: object - properties: - deployment: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Kafka Exporter `Deployment`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same - namespace to use for pulling any of the images - used by this Pod. When the - `STRIMZI_IMAGE_PULL_SECRETS` environment - variable in Cluster Operator and the - `imagePullSecrets` option are specified, only - the `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is - ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and - common container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds - after the processes running in the pod are sent - a termination signal, and the time when the - processes are forcibly halted with a kill - signal. Set this value to longer than the - expected cleanup time for your process. Value - must be a non-negative integer. A zero value - indicates delete immediately. You might need to - increase the grace period for very large Kafka - clusters, so that the Kafka brokers have enough - time to transfer their work to another broker - before they are terminated. Defaults to 30 - seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler - will be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an - optional list of hosts and IPs that will be - injected into the Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services - should be injected into Pod's environment - variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for Kafka Exporter `Pods`. - service: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. - Can be applied to different resources such - as `StatefulSets`, `Deployments`, `Pods`, - and `Services`. - description: Metadata applied to the resource. - description: Template for Kafka Exporter `Service`. - container: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to - the container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka Exporter container. - description: Customization of deployment templates and pods. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod liveness check. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults - to 3. Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default - to 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults - to 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default - to 5 seconds. Minimum value is 1. - description: Pod readiness check. - description: >- - Configuration of the Kafka Exporter. Kafka Exporter can - provide additional metrics, for example lag of consumer - group at topic/partition. - maintenanceTimeWindows: - type: array - items: - type: string - description: >- - A list of time windows for maintenance tasks (that is, - certificates renewal). Each time window is defined by a cron - expression. - required: - - kafka - - zookeeper - description: >- - The specification of the Kafka and ZooKeeper clusters, and Topic - Operator. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - listeners: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The type of the listener. Can be one of the following - three types: `plain`, `tls`, and `external`. - addresses: - type: array - items: - type: object - properties: - host: - type: string - description: >- - The DNS name or IP address of the Kafka - bootstrap service. - port: - type: integer - description: The port of the Kafka bootstrap service. - description: A list of the addresses for this listener. - bootstrapServers: - type: string - description: >- - A comma-separated list of `host:port` pairs for - connecting to the Kafka cluster using this listener. - certificates: - type: array - items: - type: string - description: >- - A list of TLS certificates which can be used to verify - the identity of the server when connecting to the - given listener. Set only for `tls` and `external` - listeners. - description: Addresses of the internal and external listeners. - clusterId: - type: string - description: Kafka cluster Id. - description: >- - The status of the Kafka and ZooKeeper clusters, and Topic - Operator. - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: strimzi-cluster-operator-entity-operator-delegation - labels: - app: strimzi - namespace: private -subjects: - - kind: ServiceAccount - name: strimzi-cluster-operator - namespace: private -roleRef: - kind: ClusterRole - name: strimzi-entity-operator - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: strimzi-cluster-operator - labels: - app: strimzi -subjects: - - kind: ServiceAccount - name: strimzi-cluster-operator - namespace: private -roleRef: - kind: ClusterRole - name: strimzi-cluster-operator-global - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: strimzi-cluster-operator-topic-operator-delegation - labels: - app: strimzi - namespace: private -subjects: - - kind: ServiceAccount - name: strimzi-cluster-operator - namespace: private -roleRef: - kind: ClusterRole - name: strimzi-topic-operator - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkausers.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaUser - listKind: KafkaUserList - singular: kafkauser - plural: kafkausers - shortNames: - - ku - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this user belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Authentication - description: How the user is authenticated - jsonPath: .spec.authentication.type - type: string - - name: Authorization - description: How the user is authorised - jsonPath: .spec.authorization.type - type: string - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - authentication: - type: object - properties: - type: - type: string - enum: - - tls - - scram-sha-512 - description: Authentication type. - required: - - type - description: Authentication mechanism enabled for this Kafka user. - authorization: - type: object - properties: - acls: - type: array - items: - type: object - properties: - host: - type: string - description: >- - The host from which the action described in the - ACL rule is allowed or denied. - operation: - type: string - enum: - - Read - - Write - - Create - - Delete - - Alter - - Describe - - ClusterAction - - AlterConfigs - - DescribeConfigs - - IdempotentWrite - - All - description: >- - Operation which will be allowed or denied. - Supported operations are: Read, Write, Create, - Delete, Alter, Describe, ClusterAction, - AlterConfigs, DescribeConfigs, IdempotentWrite and - All. - resource: - type: object - properties: - name: - type: string - description: >- - Name of resource for which given ACL rule - applies. Can be combined with `patternType` - field to use prefix pattern. - patternType: - type: string - enum: - - literal - - prefix - description: >- - Describes the pattern used in the resource - field. The supported types are `literal` and - `prefix`. With `literal` pattern type, the - resource field will be used as a definition of - a full name. With `prefix` pattern type, the - resource name will be used only as a prefix. - Default value is `literal`. - type: - type: string - enum: - - topic - - group - - cluster - - transactionalId - description: >- - Resource type. The available resource types - are `topic`, `group`, `cluster`, and - `transactionalId`. - required: - - type - description: >- - Indicates the resource for which given ACL rule - applies. - type: - type: string - enum: - - allow - - deny - description: >- - The type of the rule. Currently the only supported - type is `allow`. ACL rules with type `allow` are - used to allow user to execute the specified - operations. Default value is `allow`. - required: - - operation - - resource - description: List of ACL rules which should be applied to this user. - type: - type: string - enum: - - simple - description: >- - Authorization type. Currently the only supported type is - `simple`. `simple` authorization type uses Kafka's - `kafka.security.authorizer.AclAuthorizer` class for - authorization. - required: - - acls - - type - description: Authorization rules for this Kafka user. - quotas: - type: object - properties: - consumerByteRate: - type: integer - minimum: 0 - description: >- - A quota on the maximum bytes per-second that each client - group can fetch from a broker before the clients in the - group are throttled. Defined on a per-broker basis. - producerByteRate: - type: integer - minimum: 0 - description: >- - A quota on the maximum bytes per-second that each client - group can publish to a broker before the clients in the - group are throttled. Defined on a per-broker basis. - requestPercentage: - type: integer - minimum: 0 - description: >- - A quota on the maximum CPU utilization of each client - group as a percentage of network and I/O threads. - description: >- - Quotas on requests to control the broker resources used by - clients. Network bandwidth and request rate quotas can be - enforced.Kafka documentation for Kafka User quotas can be - found at - http://kafka.apache.org/documentation/#design_quotas. - template: - type: object - properties: - secret: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: >- - Template for KafkaUser resources. The template allows - users to specify how the `Secret` with password or TLS - certificates is generated. - description: Template to specify how Kafka User `Secrets` are generated. - description: The specification of the user. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - username: - type: string - description: Username. - secret: - type: string - description: The name of `Secret` where the credentials are stored. - description: The status of the Kafka User. - - name: v1beta1 - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this user belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Authentication - description: How the user is authenticated - jsonPath: .spec.authentication.type - type: string - - name: Authorization - description: How the user is authorised - jsonPath: .spec.authorization.type - type: string - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - authentication: - type: object - properties: - type: - type: string - enum: - - tls - - scram-sha-512 - description: Authentication type. - required: - - type - description: Authentication mechanism enabled for this Kafka user. - authorization: - type: object - properties: - acls: - type: array - items: - type: object - properties: - host: - type: string - description: >- - The host from which the action described in the - ACL rule is allowed or denied. - operation: - type: string - enum: - - Read - - Write - - Create - - Delete - - Alter - - Describe - - ClusterAction - - AlterConfigs - - DescribeConfigs - - IdempotentWrite - - All - description: >- - Operation which will be allowed or denied. - Supported operations are: Read, Write, Create, - Delete, Alter, Describe, ClusterAction, - AlterConfigs, DescribeConfigs, IdempotentWrite and - All. - resource: - type: object - properties: - name: - type: string - description: >- - Name of resource for which given ACL rule - applies. Can be combined with `patternType` - field to use prefix pattern. - patternType: - type: string - enum: - - literal - - prefix - description: >- - Describes the pattern used in the resource - field. The supported types are `literal` and - `prefix`. With `literal` pattern type, the - resource field will be used as a definition of - a full name. With `prefix` pattern type, the - resource name will be used only as a prefix. - Default value is `literal`. - type: - type: string - enum: - - topic - - group - - cluster - - transactionalId - description: >- - Resource type. The available resource types - are `topic`, `group`, `cluster`, and - `transactionalId`. - required: - - type - description: >- - Indicates the resource for which given ACL rule - applies. - type: - type: string - enum: - - allow - - deny - description: >- - The type of the rule. Currently the only supported - type is `allow`. ACL rules with type `allow` are - used to allow user to execute the specified - operations. Default value is `allow`. - required: - - operation - - resource - description: List of ACL rules which should be applied to this user. - type: - type: string - enum: - - simple - description: >- - Authorization type. Currently the only supported type is - `simple`. `simple` authorization type uses Kafka's - `kafka.security.authorizer.AclAuthorizer` class for - authorization. - required: - - acls - - type - description: Authorization rules for this Kafka user. - quotas: - type: object - properties: - consumerByteRate: - type: integer - minimum: 0 - description: >- - A quota on the maximum bytes per-second that each client - group can fetch from a broker before the clients in the - group are throttled. Defined on a per-broker basis. - producerByteRate: - type: integer - minimum: 0 - description: >- - A quota on the maximum bytes per-second that each client - group can publish to a broker before the clients in the - group are throttled. Defined on a per-broker basis. - requestPercentage: - type: integer - minimum: 0 - description: >- - A quota on the maximum CPU utilization of each client - group as a percentage of network and I/O threads. - description: >- - Quotas on requests to control the broker resources used by - clients. Network bandwidth and request rate quotas can be - enforced.Kafka documentation for Kafka User quotas can be - found at - http://kafka.apache.org/documentation/#design_quotas. - template: - type: object - properties: - secret: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: >- - Template for KafkaUser resources. The template allows - users to specify how the `Secret` with password or TLS - certificates is generated. - description: Template to specify how Kafka User `Secrets` are generated. - description: The specification of the user. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - username: - type: string - description: Username. - secret: - type: string - description: The name of `Secret` where the credentials are stored. - description: The status of the Kafka User. - - name: v1alpha1 - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this user belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Authentication - description: How the user is authenticated - jsonPath: .spec.authentication.type - type: string - - name: Authorization - description: How the user is authorised - jsonPath: .spec.authorization.type - type: string - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - authentication: - type: object - properties: - type: - type: string - enum: - - tls - - scram-sha-512 - description: Authentication type. - required: - - type - description: Authentication mechanism enabled for this Kafka user. - authorization: - type: object - properties: - acls: - type: array - items: - type: object - properties: - host: - type: string - description: >- - The host from which the action described in the - ACL rule is allowed or denied. - operation: - type: string - enum: - - Read - - Write - - Create - - Delete - - Alter - - Describe - - ClusterAction - - AlterConfigs - - DescribeConfigs - - IdempotentWrite - - All - description: >- - Operation which will be allowed or denied. - Supported operations are: Read, Write, Create, - Delete, Alter, Describe, ClusterAction, - AlterConfigs, DescribeConfigs, IdempotentWrite and - All. - resource: - type: object - properties: - name: - type: string - description: >- - Name of resource for which given ACL rule - applies. Can be combined with `patternType` - field to use prefix pattern. - patternType: - type: string - enum: - - literal - - prefix - description: >- - Describes the pattern used in the resource - field. The supported types are `literal` and - `prefix`. With `literal` pattern type, the - resource field will be used as a definition of - a full name. With `prefix` pattern type, the - resource name will be used only as a prefix. - Default value is `literal`. - type: - type: string - enum: - - topic - - group - - cluster - - transactionalId - description: >- - Resource type. The available resource types - are `topic`, `group`, `cluster`, and - `transactionalId`. - required: - - type - description: >- - Indicates the resource for which given ACL rule - applies. - type: - type: string - enum: - - allow - - deny - description: >- - The type of the rule. Currently the only supported - type is `allow`. ACL rules with type `allow` are - used to allow user to execute the specified - operations. Default value is `allow`. - required: - - operation - - resource - description: List of ACL rules which should be applied to this user. - type: - type: string - enum: - - simple - description: >- - Authorization type. Currently the only supported type is - `simple`. `simple` authorization type uses Kafka's - `kafka.security.authorizer.AclAuthorizer` class for - authorization. - required: - - acls - - type - description: Authorization rules for this Kafka user. - quotas: - type: object - properties: - consumerByteRate: - type: integer - minimum: 0 - description: >- - A quota on the maximum bytes per-second that each client - group can fetch from a broker before the clients in the - group are throttled. Defined on a per-broker basis. - producerByteRate: - type: integer - minimum: 0 - description: >- - A quota on the maximum bytes per-second that each client - group can publish to a broker before the clients in the - group are throttled. Defined on a per-broker basis. - requestPercentage: - type: integer - minimum: 0 - description: >- - A quota on the maximum CPU utilization of each client - group as a percentage of network and I/O threads. - description: >- - Quotas on requests to control the broker resources used by - clients. Network bandwidth and request rate quotas can be - enforced.Kafka documentation for Kafka User quotas can be - found at - http://kafka.apache.org/documentation/#design_quotas. - template: - type: object - properties: - secret: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: >- - Template for KafkaUser resources. The template allows - users to specify how the `Secret` with password or TLS - certificates is generated. - description: Template to specify how Kafka User `Secrets` are generated. - description: The specification of the user. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - username: - type: string - description: Username. - secret: - type: string - description: The name of `Secret` where the credentials are stored. - description: The status of the Kafka User. - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkarebalances.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaRebalance - listKind: KafkaRebalanceList - singular: kafkarebalance - plural: kafkarebalances - shortNames: - - kr - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this resource rebalances - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - goals: - type: array - items: - type: string - description: >- - A list of goals, ordered by decreasing priority, to use for - generating and executing the rebalance proposal. The - supported goals are available at - https://github.com/linkedin/cruise-control#goals. If an - empty goals list is provided, the goals declared in the - default.goals Cruise Control configuration parameter are - used. - skipHardGoalCheck: - type: boolean - description: >- - Whether to allow the hard goals specified in the Kafka CR to - be skipped in optimization proposal generation. This can be - useful when some of those hard goals are preventing a - balance solution being found. Default is false. - excludedTopics: - type: string - description: >- - A regular expression where any matching topics will be - excluded from the calculation of optimization proposals. - This expression will be parsed by the - java.util.regex.Pattern class; for more information on the - supported formar consult the documentation for that class. - concurrentPartitionMovementsPerBroker: - type: integer - minimum: 0 - description: >- - The upper bound of ongoing partition replica movements going - into/out of each broker. Default is 5. - concurrentIntraBrokerPartitionMovements: - type: integer - minimum: 0 - description: >- - The upper bound of ongoing partition replica movements - between disks within each broker. Default is 2. - concurrentLeaderMovements: - type: integer - minimum: 0 - description: >- - The upper bound of ongoing partition leadership movements. - Default is 1000. - replicationThrottle: - type: integer - minimum: 0 - description: >- - The upper bound, in bytes per second, on the bandwidth used - to move replicas. There is no limit by default. - replicaMovementStrategies: - type: array - items: - type: string - description: >- - A list of strategy class names used to determine the - execution order for the replica movements in the generated - optimization proposal. By default - BaseReplicaMovementStrategy is used, which will execute the - replica movements in the order that they were generated. - description: The specification of the Kafka rebalance. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - sessionId: - type: string - description: >- - The session identifier for requests to Cruise Control - pertaining to this KafkaRebalance resource. This is used by - the Kafka Rebalance operator to track the status of ongoing - rebalancing operations. - optimizationResult: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A JSON object describing the optimization result. - description: The status of the Kafka rebalance. - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: strimzi-cluster-operator - labels: - app: strimzi - namespace: private -spec: - replicas: 1 - selector: - matchLabels: - name: strimzi-cluster-operator - strimzi.io/kind: cluster-operator - template: - metadata: - labels: - name: strimzi-cluster-operator - strimzi.io/kind: cluster-operator - spec: - serviceAccountName: strimzi-cluster-operator - volumes: - - name: strimzi-tmp - emptyDir: - medium: Memory - - name: co-config-volume - configMap: - name: strimzi-cluster-operator - containers: - - name: strimzi-cluster-operator - image: 'quay.io/strimzi/operator:0.23.0' - ports: - - containerPort: 8080 - name: http - args: - - /opt/strimzi/bin/cluster_operator_run.sh - volumeMounts: - - name: strimzi-tmp - mountPath: /tmp - - name: co-config-volume - mountPath: /opt/strimzi/custom-config/ - env: - - name: STRIMZI_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: STRIMZI_FULL_RECONCILIATION_INTERVAL_MS - value: '120000' - - name: STRIMZI_OPERATION_TIMEOUT_MS - value: '300000' - - name: STRIMZI_DEFAULT_TLS_SIDECAR_ENTITY_OPERATOR_IMAGE - value: 'quay.io/strimzi/kafka:0.23.0-kafka-2.8.0' - - name: STRIMZI_DEFAULT_KAFKA_EXPORTER_IMAGE - value: 'quay.io/strimzi/kafka:0.23.0-kafka-2.8.0' - - name: STRIMZI_DEFAULT_CRUISE_CONTROL_IMAGE - value: 'quay.io/strimzi/kafka:0.23.0-kafka-2.8.0' - - name: STRIMZI_DEFAULT_TLS_SIDECAR_CRUISE_CONTROL_IMAGE - value: 'quay.io/strimzi/kafka:0.23.0-kafka-2.8.0' - - name: STRIMZI_KAFKA_IMAGES - value: | - 2.6.0=quay.io/strimzi/kafka:0.23.0-kafka-2.6.0 - 2.6.1=quay.io/strimzi/kafka:0.23.0-kafka-2.6.1 - 2.6.2=quay.io/strimzi/kafka:0.23.0-kafka-2.6.2 - 2.7.0=quay.io/strimzi/kafka:0.23.0-kafka-2.7.0 - 2.8.0=quay.io/strimzi/kafka:0.23.0-kafka-2.8.0 - - name: STRIMZI_KAFKA_CONNECT_IMAGES - value: | - 2.6.0=quay.io/strimzi/kafka:0.23.0-kafka-2.6.0 - 2.6.1=quay.io/strimzi/kafka:0.23.0-kafka-2.6.1 - 2.6.2=quay.io/strimzi/kafka:0.23.0-kafka-2.6.2 - 2.7.0=quay.io/strimzi/kafka:0.23.0-kafka-2.7.0 - 2.8.0=quay.io/strimzi/kafka:0.23.0-kafka-2.8.0 - - name: STRIMZI_KAFKA_CONNECT_S2I_IMAGES - value: | - 2.6.0=quay.io/strimzi/kafka:0.23.0-kafka-2.6.0 - 2.6.1=quay.io/strimzi/kafka:0.23.0-kafka-2.6.1 - 2.6.2=quay.io/strimzi/kafka:0.23.0-kafka-2.6.2 - 2.7.0=quay.io/strimzi/kafka:0.23.0-kafka-2.7.0 - 2.8.0=quay.io/strimzi/kafka:0.23.0-kafka-2.8.0 - - name: STRIMZI_KAFKA_MIRROR_MAKER_IMAGES - value: | - 2.6.0=quay.io/strimzi/kafka:0.23.0-kafka-2.6.0 - 2.6.1=quay.io/strimzi/kafka:0.23.0-kafka-2.6.1 - 2.6.2=quay.io/strimzi/kafka:0.23.0-kafka-2.6.2 - 2.7.0=quay.io/strimzi/kafka:0.23.0-kafka-2.7.0 - 2.8.0=quay.io/strimzi/kafka:0.23.0-kafka-2.8.0 - - name: STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES - value: | - 2.6.0=quay.io/strimzi/kafka:0.23.0-kafka-2.6.0 - 2.6.1=quay.io/strimzi/kafka:0.23.0-kafka-2.6.1 - 2.6.2=quay.io/strimzi/kafka:0.23.0-kafka-2.6.2 - 2.7.0=quay.io/strimzi/kafka:0.23.0-kafka-2.7.0 - 2.8.0=quay.io/strimzi/kafka:0.23.0-kafka-2.8.0 - - name: STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE - value: 'quay.io/strimzi/operator:0.23.0' - - name: STRIMZI_DEFAULT_USER_OPERATOR_IMAGE - value: 'quay.io/strimzi/operator:0.23.0' - - name: STRIMZI_DEFAULT_KAFKA_INIT_IMAGE - value: 'quay.io/strimzi/operator:0.23.0' - - name: STRIMZI_DEFAULT_KAFKA_BRIDGE_IMAGE - value: 'quay.io/strimzi/kafka-bridge:0.19.0' - - name: STRIMZI_DEFAULT_JMXTRANS_IMAGE - value: 'quay.io/strimzi/jmxtrans:0.23.0' - - name: STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE - value: 'quay.io/strimzi/kaniko-executor:0.23.0' - - name: STRIMZI_OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: STRIMZI_FEATURE_GATES - value: '' - livenessProbe: - httpGet: - path: /healthy - port: http - initialDelaySeconds: 10 - periodSeconds: 30 - readinessProbe: - httpGet: - path: /ready - port: http - initialDelaySeconds: 10 - periodSeconds: 30 - resources: - limits: - cpu: 1000m - memory: 384Mi - requests: - cpu: 200m - memory: 384Mi - strategy: - type: Recreate - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkamirrormaker2s.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaMirrorMaker2 - listKind: KafkaMirrorMaker2List - singular: kafkamirrormaker2 - plural: kafkamirrormaker2s - shortNames: - - kmm2 - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - scale: - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - labelSelectorPath: .status.labelSelector - additionalPrinterColumns: - - name: Desired replicas - description: The desired number of Kafka MirrorMaker 2.0 replicas - jsonPath: .spec.replicas - type: integer - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - version: - type: string - description: >- - The Kafka Connect version. Defaults to - {DefaultKafkaVersion}. Consult the user documentation to - understand the process required to upgrade or downgrade the - version. - replicas: - type: integer - description: The number of pods in the Kafka Connect group. - image: - type: string - description: The docker image for the pods. - connectCluster: - type: string - description: >- - The cluster alias used for Kafka Connect. The alias must - match a cluster in the list at `spec.clusters`. - clusters: - type: array - items: - type: object - properties: - alias: - type: string - pattern: '^[a-zA-Z0-9\._\-]{1,100}$' - description: Alias used to reference the Kafka cluster. - bootstrapServers: - type: string - description: >- - A comma-separated list of `host:port` pairs for - establishing the connection to the Kafka cluster. - tls: - type: object - properties: - trustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: Trusted certificates for TLS connection. - description: >- - TLS configuration for connecting MirrorMaker 2.0 - connectors to a cluster. - authentication: - type: object - properties: - accessToken: - type: object - properties: - key: - type: string - description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret containing - the secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the access - token which was obtained from the authorization - server. - accessTokenIsJwt: - type: boolean - description: >- - Configure whether access token should be treated - as JWT. This should be set to `false` if the - authorization server returns opaque tokens. - Defaults to `true`. - certificateAndKey: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - key: - type: string - description: The name of the private key in the Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - key - - secretName - description: >- - Reference to the `Secret` which holds the - certificate and private key pair. - clientId: - type: string - description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - clientSecret: - type: object - properties: - key: - type: string - description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret containing - the secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth - client secret which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. - Default value is `false`. - maxTokenExpirySeconds: - type: integer - description: >- - Set or limit time-to-live of the access tokens to - the specified number of seconds. This should be - set if the authorization server returns opaque - tokens. - passwordSecret: - type: object - properties: - password: - type: string - description: >- - The name of the key in the Secret under which - the password is stored. - secretName: - type: string - description: >- - The name of the Secret containing the - password. - required: - - password - - secretName - description: >- - Reference to the `Secret` which holds the - password. - refreshToken: - type: object - properties: - key: - type: string - description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret containing - the secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the refresh - token which can be used to obtain access token - from the authorization server. - scope: - type: string - description: >- - OAuth scope to use when authenticating against the - authorization server. Some authorization servers - require this to be set. The possible values depend - on how authorization server is configured. By - default `scope` is not specified when doing the - token endpoint request. - tlsTrustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the - OAuth server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - tls - - scram-sha-512 - - plain - - oauth - description: >- - Authentication type. Currently the only supported - types are `tls`, `scram-sha-512`, and `plain`. - `scram-sha-512` type uses SASL SCRAM-SHA-512 - Authentication. `plain` type uses SASL PLAIN - Authentication. `oauth` type uses SASL OAUTHBEARER - Authentication. The `tls` type uses TLS Client - Authentication. The `tls` type is supported only - over TLS connections. - username: - type: string - description: Username used for the authentication. - required: - - type - description: >- - Authentication configuration for connecting to the - cluster. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The MirrorMaker 2.0 cluster config. Properties with - the following prefixes cannot be set: ssl., sasl., - security., listeners, plugin.path, rest., - bootstrap.servers, consumer.interceptor.classes, - producer.interceptor.classes (with the exception of: - ssl.endpoint.identification.algorithm, - ssl.cipher.suites, ssl.protocol, - ssl.enabled.protocols). - required: - - alias - - bootstrapServers - description: Kafka clusters for mirroring. - mirrors: - type: array - items: - type: object - properties: - sourceCluster: - type: string - description: >- - The alias of the source cluster used by the Kafka - MirrorMaker 2.0 connectors. The alias must match a - cluster in the list at `spec.clusters`. - targetCluster: - type: string - description: >- - The alias of the target cluster used by the Kafka - MirrorMaker 2.0 connectors. The alias must match a - cluster in the list at `spec.clusters`. - sourceConnector: - type: object - properties: - tasksMax: - type: integer - minimum: 1 - description: >- - The maximum number of tasks for the Kafka - Connector. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka Connector configuration. The following - properties cannot be set: connector.class, - tasks.max. - pause: - type: boolean - description: >- - Whether the connector should be paused. Defaults - to false. - description: >- - The specification of the Kafka MirrorMaker 2.0 source - connector. - heartbeatConnector: - type: object - properties: - tasksMax: - type: integer - minimum: 1 - description: >- - The maximum number of tasks for the Kafka - Connector. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka Connector configuration. The following - properties cannot be set: connector.class, - tasks.max. - pause: - type: boolean - description: >- - Whether the connector should be paused. Defaults - to false. - description: >- - The specification of the Kafka MirrorMaker 2.0 - heartbeat connector. - checkpointConnector: - type: object - properties: - tasksMax: - type: integer - minimum: 1 - description: >- - The maximum number of tasks for the Kafka - Connector. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka Connector configuration. The following - properties cannot be set: connector.class, - tasks.max. - pause: - type: boolean - description: >- - Whether the connector should be paused. Defaults - to false. - description: >- - The specification of the Kafka MirrorMaker 2.0 - checkpoint connector. - topicsPattern: - type: string - description: >- - A regular expression matching the topics to be - mirrored, for example, "topic1\|topic2\|topic3". - Comma-separated lists are also supported. - topicsBlacklistPattern: - type: string - description: >- - A regular expression matching the topics to exclude - from mirroring. Comma-separated lists are also - supported. - groupsPattern: - type: string - description: >- - A regular expression matching the consumer groups to - be mirrored. Comma-separated lists are also supported. - groupsBlacklistPattern: - type: string - description: >- - A regular expression matching the consumer groups to - exclude from mirroring. Comma-separated lists are also - supported. - required: - - sourceCluster - - targetCluster - description: Configuration of the MirrorMaker 2.0 connectors. + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: TLS configuration. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored in + the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the access token + which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: >- + Configure whether access token should be treated as JWT. + This should be set to `false` if the authorization + server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: >- + OAuth audience to use when authenticating against the + authorization server. Some authorization servers require + the audience to be explicitly set. The possible values + depend on how the authorization server is configured. By + default, `audience` is not specified when performing the + token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: >- + Reference to the `Secret` which holds the certificate + and private key pair. + clientId: + type: string + description: >- + OAuth Client ID which the Kafka client can use to + authenticate against the OAuth server and use the token + endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored in + the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the OAuth client + secret which the Kafka client can use to authenticate + against the OAuth server and use the token endpoint URI. + connectTimeoutSeconds: + type: integer + description: >- + The connect timeout in seconds when connecting to + authorization server. If not set, the effective connect + timeout is 60 seconds. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. Default + value is `false`. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value is + `false`. + httpRetries: + type: integer + description: >- + The maximum number of retries to attempt if an initial + HTTP request fails. If not set, the default is to not + attempt any retries. + httpRetryPauseMs: + type: integer + description: >- + The pause to take before retrying a failed HTTP request. + If not set, the default is to not pause at all but to + immediately repeat a request. + maxTokenExpirySeconds: + type: integer + description: >- + Set or limit time-to-live of the access tokens to the + specified number of seconds. This should be set if the + authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: >- + The name of the key in the Secret under which the + password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + readTimeoutSeconds: + type: integer + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the effective read + timeout is 60 seconds. + refreshToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored in + the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the refresh token + which can be used to obtain access token from the + authorization server. + scope: + type: string + description: >- + OAuth scope to use when authenticating against the + authorization server. Some authorization servers require + this to be set. The possible values depend on how + authorization server is configured. By default `scope` + is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the OAuth + server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: >- + Authentication type. Currently the supported types are + `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and + 'oauth'. `scram-sha-256` and `scram-sha-512` types use + SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 + Authentication, respectively. `plain` type uses SASL + PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER + Authentication. The `tls` type uses TLS Client + Authentication. The `tls` type is supported only over + TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for Kafka Connect. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka Connect configuration. Properties with the + following prefixes cannot be set: ssl., sasl., security., + listeners, plugin.path, rest., bootstrap.servers, + consumer.interceptor.classes, producer.interceptor.classes + (with the exception of: + ssl.endpoint.identification.algorithm, ssl.cipher.suites, + ssl.protocol, ssl.enabled.protocols). resources: type: object properties: @@ -8506,11 +567,11 @@ spec: description: A map of -XX options to the JVM. '-Xms': type: string - pattern: '[0-9]+[mMgG]?' + pattern: '^[0-9]+[mMgG]?$' description: '-Xms option to to the JVM.' '-Xmx': type: string - pattern: '[0-9]+[mMgG]?' + pattern: '^[0-9]+[mMgG]?$' description: '-Xmx option to to the JVM.' gcLoggingEnabled: type: boolean @@ -8586,6 +647,27 @@ spec: required: - type description: Logging configuration for Kafka Connect. + clientRackInitImage: + type: string + description: >- + The image of the init container used for initializing the + `client.rack`. + rack: + type: object + properties: + topologyKey: + type: string + example: topology.kubernetes.io/zone + description: >- + A key that matches labels assigned to the Kubernetes + cluster nodes. The value of the label is used to set a + broker's `broker.rack` config, and the `client.rack` + config for Kafka Connect or MirrorMaker 2.0. + required: + - topologyKey + description: >- + Configuration of the node label which will be used as the + `client.rack` consumer configuration. tracing: type: object properties: @@ -8593,9 +675,12 @@ spec: type: string enum: - jaeger + - opentelemetry description: >- Type of the tracing used. Currently the only supported - type is `jaeger` for Jaeger tracing. + types are `jaeger` for OpenTracing (Jaeger) tracing and + `opentelemetry` for OpenTelemetry tracing. The + OpenTracing (Jaeger) tracing is deprecated. required: - type description: The configuration of tracing in Kafka Connect. @@ -8631,10 +716,35 @@ spec: - RollingUpdate - Recreate description: >- - DeploymentStrategy which will be used for this - Deployment. Valid values are `RollingUpdate` and - `Recreate`. Defaults to `RollingUpdate`. + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. description: Template for Kafka Connect `Deployment`. + podSet: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for Kafka Connect `StrimziPodSet` resource. pod: type: object properties: @@ -8724,6 +834,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string description: >- @@ -8850,6 +962,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -8882,6 +1013,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -8918,6 +1068,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -8950,6 +1119,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -9000,6 +1188,13 @@ spec: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir volume + (`/tmp`). Default value is `5Mi`. enableServiceLinks: type: boolean description: >- @@ -9029,8 +1224,18 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + matchLabelKeys: + type: array + items: + type: string maxSkew: type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string topologyKey: type: string whenUnsatisfiable: @@ -9092,7 +1297,7 @@ spec: value based on the `ipFamilyPolicy` setting. Available on Kubernetes 1.20 and newer. description: Template for Kafka Connect API `Service`. - buildConfig: + headlessService: type: object properties: metadata: @@ -9101,25 +1306,129 @@ spec: labels: x-kubernetes-preserve-unknown-fields: true type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the service. + Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters. `RequireDualStack` fails + unless there are two IP families on dual-stack + configured clusters. If unspecified, Kubernetes will + choose the default value based on the service type. + Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Kafka Connect headless `Service`. + connectContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: >- - Template for the Kafka Connect BuildConfig used to build - new container images. The BuildConfig is used only on - OpenShift. - buildContainer: + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Connect container. + initContainer: type: object properties: env: @@ -9189,12 +1498,96 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string description: Security context for the container. - description: >- - Template for the Kafka Connect Build container. The - build container is used only on Kubernetes. + description: Template for the Kafka init container. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: >- + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: >- + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is allowed + when the `maxUnavailable` number of pods or fewer + are unavailable after the eviction. Setting this + value to 0 prevents all voluntary evictions, so the + pods must be evicted manually. Defaults to 1. + description: Template for Kafka Connect `PodDisruptionBudget`. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect service account. + clusterRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect ClusterRoleBinding. buildPod: type: object properties: @@ -9284,6 +1677,8 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string description: >- @@ -9410,6 +1805,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -9442,6 +1856,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -9478,6 +1911,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -9510,6 +1962,25 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: @@ -9560,6 +2031,13 @@ spec: The pod's HostAliases. HostAliases is an optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir volume + (`/tmp`). Default value is `5Mi`. enableServiceLinks: type: boolean description: >- @@ -9589,41 +2067,27 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + matchLabelKeys: + type: array + items: + type: string maxSkew: type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: >- - Template for Kafka Connect Build `Pods`. The build pod - is used only on Kubernetes. - clusterRoleBinding: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: Template for the Kafka Connect ClusterRoleBinding. - connectContainer: + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: >- + Template for Kafka Connect Build `Pods`. The build pod + is used only on Kubernetes. + buildContainer: type: object properties: env: @@ -9693,85 +2157,73 @@ spec: type: string gmsaCredentialSpecName: type: string + hostProcess: + type: boolean runAsUserName: type: string description: Security context for the container. - description: Template for the Kafka Connect container. - initContainer: + description: >- + Template for the Kafka Connect Build container. The + build container is used only on Kubernetes. + buildConfig: type: object properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: + metadata: type: object properties: - allowPrivilegeEscalation: - type: boolean - capabilities: + labels: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: >- + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + pullSecret: + type: string + description: >- + Container Registry Secret with the credentials for + pulling the base image. + description: >- + Template for the Kafka Connect BuildConfig used to build + new container images. The BuildConfig is used only on + OpenShift. + buildServiceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka init container. - podDisruptionBudget: + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect Build service account. + jmxSecret: type: object properties: metadata: @@ -9793,24 +2245,14 @@ spec: be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is allowed - when the `maxUnavailable` number of pods or fewer - are unavailable after the eviction. Setting this - value to 0 prevents all voluntary evictions, so the - pods must be evicted manually. Defaults to 1. - description: Template for Kafka Connect `PodDisruptionBudget`. + description: Metadata applied to the resource. + description: >- + Template for Secret of the Kafka Connect Cluster JMX + authentication. description: >- - Template for Kafka Connect and Kafka Connect S2I resources. - The template allows users to specify how the `Deployment`, - `Pods` and `Service` are generated. + Template for Kafka Connect and Kafka Mirror Maker 2 + resources. The template allows users to specify how the + `Deployment`, `Pods` and `Service` are generated. externalConfiguration: type: object properties: @@ -9859,7 +2301,7 @@ spec: - name - valueFrom description: >- - Allows to pass data from Secret or ConfigMap to the + Makes data from a Secret or ConfigMap available in the Kafka Connect pods as environment variables. volumes: type: array @@ -9915,241 +2357,295 @@ spec: secretName: type: string description: >- - Reference to a key in a Secret. Exactly one Secret - or ConfigMap has to be specified. + Reference to a key in a Secret. Exactly one Secret + or ConfigMap has to be specified. + required: + - name + description: >- + Makes data from a Secret or ConfigMap available in the + Kafka Connect pods as volumes. + description: >- + Pass data from Secrets or ConfigMaps to the Kafka Connect + pods and use them to configure connectors. + build: + type: object + properties: + output: + type: object + properties: + additionalKanikoOptions: + type: array + items: + type: string + description: >- + Configures additional options which will be passed + to the Kaniko executor when building the new Connect + image. Allowed options are: --customPlatform, + --insecure, --insecure-pull, --insecure-registry, + --log-format, --log-timestamp, --registry-mirror, + --reproducible, --single-snapshot, + --skip-tls-verify, --skip-tls-verify-pull, + --skip-tls-verify-registry, --verbosity, + --snapshotMode, --use-new-run. These options will be + used only on Kubernetes where the Kaniko executor is + used. They will be ignored on OpenShift. The options + are described in the + link:https://github.com/GoogleContainerTools/kaniko[Kaniko + GitHub repository^]. Changing this field does not + trigger new build of the Kafka Connect image. + image: + type: string + description: The name of the image which will be built. Required. + pushSecret: + type: string + description: >- + Container Registry Secret with the credentials for + pushing the newly built image. + type: + type: string + enum: + - docker + - imagestream + description: >- + Output type. Must be either `docker` for pushing the + newly build image to Docker compatible registry or + `imagestream` for pushing the image to OpenShift + ImageStream. Required. + required: + - image + - type + description: >- + Configures where should the newly built image be stored. + Required. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve for the build. + plugins: + type: array + items: + type: object + properties: + name: + type: string + pattern: '^[a-z0-9][-_a-z0-9]*[a-z0-9]$' + description: >- + The unique name of the connector plugin. Will be + used to generate the path where the connector + artifacts will be stored. The name has to be + unique within the KafkaConnect resource. The name + has to follow the following pattern: + `^[a-z][-_a-z0-9]*[a-z]$`. Required. + artifacts: + type: array + items: + type: object + properties: + artifact: + type: string + description: >- + Maven artifact id. Applicable to the `maven` + artifact type only. + fileName: + type: string + description: >- + Name under which the artifact will be + stored. + group: + type: string + description: >- + Maven group id. Applicable to the `maven` + artifact type only. + insecure: + type: boolean + description: >- + By default, connections using TLS are + verified to check they are secure. The + server certificate used must be valid, + trusted, and contain the server name. By + setting this option to `true`, all TLS + verification is disabled and the artifact + will be downloaded, even when the server is + considered insecure. + repository: + type: string + description: >- + Maven repository to download the artifact + from. Applicable to the `maven` artifact + type only. + sha512sum: + type: string + description: >- + SHA512 checksum of the artifact. Optional. + If specified, the checksum will be verified + while building the new container. If not + specified, the downloaded artifact will not + be verified. Not applicable to the `maven` + artifact type. + type: + type: string + enum: + - jar + - tgz + - zip + - maven + - other + description: >- + Artifact type. Currently, the supported + artifact types are `tgz`, `jar`, `zip`, + `other` and `maven`. + url: + type: string + pattern: >- + ^(https?|ftp)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$ + description: >- + URL of the artifact which will be + downloaded. Strimzi does not do any security + scanning of the downloaded artifacts. For + security reasons, you should first verify + the artifacts manually and configure the + checksum verification to make sure the same + artifact is used in the automated build. + Required for `jar`, `zip`, `tgz` and `other` + artifacts. Not applicable to the `maven` + artifact type. + version: + type: string + description: >- + Maven version number. Applicable to the + `maven` artifact type only. + required: + - type + description: >- + List of artifacts which belong to this connector + plugin. Required. required: - name + - artifacts description: >- - Allows to pass data from Secret or ConfigMap to the - Kafka Connect pods as volumes. + List of connector plugins which should be added to the + Kafka Connect. Required. + required: + - output + - plugins description: >- - Pass data from Secrets or ConfigMaps to the Kafka Connect - pods and use them to configure connectors. + Configures how the Connect container image should be built. + Optional. metricsConfig: type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the structure of - this configuration, see the {JMXExporter}. - required: - - type - - valueFrom - description: Metrics configuration. - required: - - connectCluster - description: The specification of the Kafka MirrorMaker 2.0 cluster. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - url: - type: string - description: >- - The URL of the REST API endpoint for managing and monitoring - Kafka Connect connectors. - connectorPlugins: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The type of the connector plugin. The available types - are `sink` and `source`. - version: - type: string - description: The version of the connector plugin. - class: - type: string - description: The class of the connector plugin. - description: >- - The list of connector plugins available in this Kafka - Connect deployment. - connectors: - type: array - items: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - List of MirrorMaker 2.0 connector statuses, as reported by - the Kafka Connect REST API. - labelSelector: - type: string - description: Label selector for pods providing this resource. - replicas: - type: integer - description: >- - The current number of pods being used to provide this - resource. - description: The status of the Kafka MirrorMaker 2.0 cluster. - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: strimzi-entity-operator - labels: - app: strimzi -rules: - - apiGroups: - - kafka.strimzi.io - resources: - - kafkatopics - - kafkatopics/status - - kafkausers - - kafkausers/status - verbs: - - get - - list - - watch - - create - - patch - - update - - delete - - apiGroups: - - '' - resources: - - events - verbs: - - create - - apiGroups: - - '' - resources: - - secrets - verbs: - - get - - list - - watch - - create - - delete - - patch - - update - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: strimzi-cluster-operator-global - labels: - app: strimzi -rules: - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - verbs: - - get - - list - - watch - - create - - delete - - patch - - update - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - apiGroups: - - '' - resources: - - nodes - verbs: - - list - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: strimzi-cluster-operator-kafka-broker-delegation - labels: - app: strimzi -subjects: - - kind: ServiceAccount - name: strimzi-cluster-operator - namespace: private -roleRef: - kind: ClusterRole - name: strimzi-kafka-broker - apiGroup: rbac.authorization.k8s.io - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: strimzi-cluster-operator - labels: - app: strimzi - namespace: private -subjects: - - kind: ServiceAccount - name: strimzi-cluster-operator - namespace: private -roleRef: - kind: ClusterRole - name: strimzi-cluster-operator-namespaced - apiGroup: rbac.authorization.k8s.io + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: >- + Metrics type. Only 'jmxPrometheusExporter' supported + currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing the + configuration. + description: >- + ConfigMap entry where the Prometheus JMX Exporter + configuration is stored. For details of the structure of + this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + required: + - bootstrapServers + description: The specification of the Kafka Connect cluster. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + url: + type: string + description: >- + The URL of the REST API endpoint for managing and monitoring + Kafka Connect connectors. + connectorPlugins: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The type of the connector plugin. The available types + are `sink` and `source`. + version: + type: string + description: The version of the connector plugin. + class: + type: string + description: The class of the connector plugin. + description: >- + The list of connector plugins available in this Kafka + Connect deployment. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: >- + The current number of pods being used to provide this + resource. + description: The status of the Kafka Connect cluster. --- apiVersion: rbac.authorization.k8s.io/v1 @@ -10201,50 +2697,6 @@ rules: - delete - patch - update - - apiGroups: - - kafka.strimzi.io - resources: - - kafkas - - kafkas/status - - kafkaconnects - - kafkaconnects/status - - kafkaconnects2is - - kafkaconnects2is/status - - kafkaconnectors - - kafkaconnectors/status - - kafkamirrormakers - - kafkamirrormakers/status - - kafkabridges - - kafkabridges/status - - kafkamirrormaker2s - - kafkamirrormaker2s/status - - kafkarebalances - - kafkarebalances/status - verbs: - - get - - list - - watch - - create - - delete - - patch - - update - - apiGroups: - - extensions - resources: - - deployments - - deployments/scale - - replicasets - - replicationcontrollers - - networkpolicies - - ingresses - verbs: - - get - - list - - watch - - create - - delete - - patch - - update - apiGroups: - apps resources: @@ -10263,25 +2715,11 @@ rules: - update - apiGroups: - '' + - events.k8s.io resources: - events verbs: - create - - apiGroups: - - apps.openshift.io - resources: - - deploymentconfigs - - deploymentconfigs/scale - - deploymentconfigs/status - - deploymentconfigs/finalizers - verbs: - - get - - list - - watch - - create - - delete - - patch - - update - apiGroups: - build.openshift.io resources: @@ -10296,19 +2734,6 @@ rules: - delete - patch - update - - apiGroups: - - image.openshift.io - resources: - - imagestreams - - imagestreams/status - verbs: - - get - - list - - watch - - create - - delete - - patch - - update - apiGroups: - networking.k8s.io resources: @@ -10336,117 +2761,211 @@ rules: - patch - update - apiGroups: - - policy + - image.openshift.io resources: - - poddisruptionbudgets + - imagestreams verbs: - get - - list - - watch - - create - - delete - - patch - - update - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: strimzi-topic-operator - labels: - app: strimzi -rules: - apiGroups: - - kafka.strimzi.io + - policy resources: - - kafkatopics + - poddisruptionbudgets verbs: - get - list - watch - create + - delete - patch - update - - delete - - apiGroups: - - '' - resources: - - events - verbs: - - create --- apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +kind: RoleBinding metadata: - name: strimzi-cluster-operator-kafka-client-delegation + name: strimzi-cluster-operator-watched labels: app: strimzi + namespace: private subjects: - kind: ServiceAccount name: strimzi-cluster-operator namespace: private roleRef: kind: ClusterRole - name: strimzi-kafka-client + name: strimzi-cluster-operator-watched apiGroup: rbac.authorization.k8s.io --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: strimzi-kafka-client - labels: - app: strimzi -rules: - - apiGroups: - - '' - resources: - - nodes - verbs: - - get - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: strimzi-cluster-operator - labels: - app: strimzi - namespace: private - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: - name: strimzi-kafka-broker + name: kafkaconnectors.kafka.strimzi.io labels: app: strimzi -rules: - - apiGroups: - - '' - resources: - - nodes - verbs: - - get + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: KafkaConnector + listKind: KafkaConnectorList + singular: kafkaconnector + plural: kafkaconnectors + shortNames: + - kctr + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.tasksMax + statusReplicasPath: .status.tasksMax + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka Connect cluster this connector belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Connector class + description: The class used by this connector + jsonPath: .spec.class + type: string + - name: Max Tasks + description: Maximum number of tasks + jsonPath: .spec.tasksMax + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + class: + type: string + description: The Class for the Kafka Connector. + tasksMax: + type: integer + minimum: 1 + description: The maximum number of tasks for the Kafka Connector. + autoRestart: + type: object + properties: + enabled: + type: boolean + description: >- + Whether automatic restart for failed connectors and + tasks should be enabled or disabled. + description: Automatic restart of connector and tasks configuration. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka Connector configuration. The following properties + cannot be set: connector.class, tasks.max. + pause: + type: boolean + description: Whether the connector should be paused. Defaults to false. + description: The specification of the Kafka Connector. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + autoRestart: + type: object + properties: + count: + type: integer + description: The number of times the connector or task is restarted. + connectorName: + type: string + description: The name of the connector being restarted. + lastRestartTimestamp: + type: string + description: >- + The last time the automatic restart was attempted. The + required format is 'yyyy-MM-ddTHH:mm:ssZ' in the UTC + time zone. + description: The auto restart status. + connectorStatus: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The connector status, as reported by the Kafka Connect REST + API. + tasksMax: + type: integer + description: The maximum number of tasks for the Kafka Connector. + topics: + type: array + items: + type: string + description: The list of topics used by the Kafka Connector. + description: The status of the Kafka Connector. --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kafkatopics.kafka.strimzi.io + name: kafkabridges.kafka.strimzi.io labels: app: strimzi strimzi.io/crd-install: 'true' spec: group: kafka.strimzi.io names: - kind: KafkaTopic - listKind: KafkaTopicList - singular: kafkatopic - plural: kafkatopics + kind: KafkaBridge + listKind: KafkaBridgeList + singular: kafkabridge + plural: kafkabridges shortNames: - - kt + - kb categories: - strimzi scope: Namespaced @@ -10458,19 +2977,20 @@ spec: storage: true subresources: status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this topic belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Partitions - description: The desired number of partitions in the topic - jsonPath: .spec.partitions - type: integer - - name: Replication factor - description: The desired number of replicas of each partition + - name: Desired replicas + description: The desired number of Kafka Bridge replicas jsonPath: .spec.replicas type: integer + - name: Bootstrap Servers + description: The boostrap servers + jsonPath: .spec.bootstrapServers + type: string + priority: 1 - name: Ready description: The state of the custom resource jsonPath: '.status.conditions[?(@.type=="Ready")].status' @@ -10482,36 +3002,1335 @@ spec: spec: type: object properties: - partitions: + replicas: type: integer - minimum: 1 + minimum: 0 + description: The number of pods in the `Deployment`. + image: + type: string + description: The docker image for the pods. + bootstrapServers: + type: string + description: >- + A list of host:port pairs for establishing the initial + connection to the Kafka cluster. + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: >- + TLS configuration for connecting Kafka Bridge to the + cluster. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored in + the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the access token + which was obtained from the authorization server. + accessTokenIsJwt: + type: boolean + description: >- + Configure whether access token should be treated as JWT. + This should be set to `false` if the authorization + server returns opaque tokens. Defaults to `true`. + audience: + type: string + description: >- + OAuth audience to use when authenticating against the + authorization server. Some authorization servers require + the audience to be explicitly set. The possible values + depend on how the authorization server is configured. By + default, `audience` is not specified when performing the + token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - key + - secretName + description: >- + Reference to the `Secret` which holds the certificate + and private key pair. + clientId: + type: string + description: >- + OAuth Client ID which the Kafka client can use to + authenticate against the OAuth server and use the token + endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored in + the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the OAuth client + secret which the Kafka client can use to authenticate + against the OAuth server and use the token endpoint URI. + connectTimeoutSeconds: + type: integer + description: >- + The connect timeout in seconds when connecting to + authorization server. If not set, the effective connect + timeout is 60 seconds. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. Default + value is `false`. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value is + `false`. + httpRetries: + type: integer + description: >- + The maximum number of retries to attempt if an initial + HTTP request fails. If not set, the default is to not + attempt any retries. + httpRetryPauseMs: + type: integer + description: >- + The pause to take before retrying a failed HTTP request. + If not set, the default is to not pause at all but to + immediately repeat a request. + maxTokenExpirySeconds: + type: integer + description: >- + Set or limit time-to-live of the access tokens to the + specified number of seconds. This should be set if the + authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: >- + The name of the key in the Secret under which the + password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + readTimeoutSeconds: + type: integer + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the effective read + timeout is 60 seconds. + refreshToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored in + the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the refresh token + which can be used to obtain access token from the + authorization server. + scope: + type: string + description: >- + OAuth scope to use when authenticating against the + authorization server. Some authorization servers require + this to be set. The possible values depend on how + authorization server is configured. By default `scope` + is not specified when doing the token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + secretName: + type: string + description: The name of the Secret containing the certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the OAuth + server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: >- + Authentication type. Currently the supported types are + `tls`, `scram-sha-256`, `scram-sha-512`, `plain`, and + 'oauth'. `scram-sha-256` and `scram-sha-512` types use + SASL SCRAM-SHA-256 and SASL SCRAM-SHA-512 + Authentication, respectively. `plain` type uses SASL + PLAIN Authentication. `oauth` type uses SASL OAUTHBEARER + Authentication. The `tls` type uses TLS Client + Authentication. The `tls` type is supported only over + TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: Authentication configuration for connecting to the cluster. + http: + type: object + properties: + port: + type: integer + minimum: 1023 + description: The port which is the server listening on. + cors: + type: object + properties: + allowedOrigins: + type: array + items: + type: string + description: >- + List of allowed origins. Java regular expressions + can be used. + allowedMethods: + type: array + items: + type: string + description: List of allowed HTTP methods. + required: + - allowedOrigins + - allowedMethods + description: CORS configuration for the HTTP Bridge. + description: The HTTP related configuration. + adminClient: + type: object + properties: + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka AdminClient configuration used for AdminClient + instances created by the bridge. + description: Kafka AdminClient related configuration. + consumer: + type: object + properties: + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka consumer configuration used for consumer + instances created by the bridge. Properties with the + following prefixes cannot be set: ssl., + bootstrap.servers, group.id, sasl., security. (with the + exception of: ssl.endpoint.identification.algorithm, + ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). + description: Kafka consumer related configuration. + producer: + type: object + properties: + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka producer configuration used for producer + instances created by the bridge. Properties with the + following prefixes cannot be set: ssl., + bootstrap.servers, sasl., security. (with the exception + of: ssl.endpoint.identification.algorithm, + ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). + description: Kafka producer related configuration. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + jvmOptions: + type: object + properties: + '-XX': + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean + description: >- + Specifies whether the Garbage Collection logging is + enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: >- + A map of additional system properties which will be + passed using the `-D` option to the JVM. + description: '**Currently not supported** JVM Options for pods.' + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: 'Logging type, must be either ''inline'' or ''external''.' + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing the + configuration. + description: >- + `ConfigMap` entry where the logging configuration is + stored. + required: + - type + description: Logging configuration for Kafka Bridge. + clientRackInitImage: + type: string description: >- - The number of partitions the topic should have. This cannot - be decreased after topic creation. It can be increased after - topic creation, but it is important to understand the - consequences that has, especially for topics with semantic - partitioning. When absent this will default to the broker - configuration for `num.partitions`. - replicas: - type: integer - minimum: 1 - maximum: 32767 + The image of the init container used for initializing the + `client.rack`. + rack: + type: object + properties: + topologyKey: + type: string + example: topology.kubernetes.io/zone + description: >- + A key that matches labels assigned to the Kubernetes + cluster nodes. The value of the label is used to set a + broker's `broker.rack` config, and the `client.rack` + config for Kafka Connect or MirrorMaker 2.0. + required: + - topologyKey + description: >- + Configuration of the node label which will be used as the + client.rack consumer configuration. + enableMetrics: + type: boolean + description: Enable the metrics for the Kafka Bridge. Default is false. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. + Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default to + 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to + 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default to + 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. + Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default to + 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to + 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default to + 5 seconds. Minimum value is 1. + description: Pod readiness checking. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: >- + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for Kafka Bridge `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same namespace + to use for pulling any of the images used by this + Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` + environment variable in Cluster Operator and the + `imagePullSecrets` option are specified, only the + `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and common + container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds after + the processes running in the pod are sent a + termination signal, and the time when the processes + are forcibly halted with a kill signal. Set this + value to longer than the expected cleanup time for + your process. Value must be a non-negative integer. + A zero value indicates delete immediately. You might + need to increase the grace period for very large + Kafka clusters, so that the Kafka brokers have + enough time to transfer their work to another broker + before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: >- + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: >- + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler will + be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an optional + list of hosts and IPs that will be injected into the + Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir volume + (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services should + be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Bridge `Pods`. + apiService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the service. + Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters. `RequireDualStack` fails + unless there are two IP families on dual-stack + configured clusters. If unspecified, Kubernetes will + choose the default value based on the service type. + Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Kafka Bridge API `Service`. + podDisruptionBudget: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: >- + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: >- + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is allowed + when the `maxUnavailable` number of pods or fewer + are unavailable after the eviction. Setting this + value to 0 prevents all voluntary evictions, so the + pods must be evicted manually. Defaults to 1. + description: Template for Kafka Bridge `PodDisruptionBudget`. + bridgeContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Bridge container. + clusterRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Bridge ClusterRoleBinding. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Bridge service account. + initContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Bridge init container. description: >- - The number of replicas the topic should have. When absent - this will default to the broker configuration for - `default.replication.factor`. - config: - x-kubernetes-preserve-unknown-fields: true + Template for Kafka Bridge resources. The template allows + users to specify how a `Deployment` and `Pod` is generated. + tracing: type: object - description: The topic configuration. - topicName: - type: string - description: >- - The name of the topic. When absent this will default to the - metadata.name of the topic. It is recommended to not set - this unless the topic name is not a valid Kubernetes - resource name. - description: The specification of the topic. + properties: + type: + type: string + enum: + - jaeger + - opentelemetry + description: >- + Type of the tracing used. Currently the only supported + types are `jaeger` for OpenTracing (Jaeger) tracing and + `opentelemetry` for OpenTelemetry tracing. The + OpenTracing (Jaeger) tracing is deprecated. + required: + - type + description: The configuration of tracing in Kafka Bridge. + required: + - bootstrapServers + description: The specification of the Kafka Bridge. status: type: object properties: @@ -10552,131 +4371,68 @@ spec: description: >- The generation of the CRD that was last reconciled by the operator. - topicName: - type: string - description: Topic name. - description: The status of the topic. - - name: v1beta1 - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this topic belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Partitions - description: The desired number of partitions in the topic - jsonPath: .spec.partitions - type: integer - - name: Replication factor - description: The desired number of replicas of each partition - jsonPath: .spec.replicas - type: integer - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - partitions: - type: integer - minimum: 1 - description: >- - The number of partitions the topic should have. This cannot - be decreased after topic creation. It can be increased after - topic creation, but it is important to understand the - consequences that has, especially for topics with semantic - partitioning. When absent this will default to the broker - configuration for `num.partitions`. - replicas: - type: integer - minimum: 1 - maximum: 32767 - description: >- - The number of replicas the topic should have. When absent - this will default to the broker configuration for - `default.replication.factor`. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: The topic configuration. - topicName: + url: type: string description: >- - The name of the topic. When absent this will default to the - metadata.name of the topic. It is recommended to not set - this unless the topic name is not a valid Kubernetes - resource name. - description: The specification of the topic. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - topicName: - type: string - description: Topic name. - description: The status of the topic. - - name: v1alpha1 + The URL at which external client applications can access the + Kafka Bridge. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: >- + The current number of pods being used to provide this + resource. + description: The status of the Kafka Bridge. + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkamirrormakers.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: KafkaMirrorMaker + listKind: KafkaMirrorMakerList + singular: kafkamirrormaker + plural: kafkamirrormakers + shortNames: + - kmm + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 served: true - storage: false + storage: true subresources: status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka cluster this topic belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Partitions - description: The desired number of partitions in the topic - jsonPath: .spec.partitions - type: integer - - name: Replication factor - description: The desired number of replicas of each partition + - name: Desired replicas + description: The desired number of Kafka MirrorMaker replicas jsonPath: .spec.replicas type: integer + - name: Consumer Bootstrap Servers + description: The boostrap servers for the consumer + jsonPath: .spec.consumer.bootstrapServers + type: string + priority: 1 + - name: Producer Bootstrap Servers + description: The boostrap servers for the producer + jsonPath: .spec.producer.bootstrapServers + type: string + priority: 1 - name: Ready description: The state of the custom resource jsonPath: '.status.conditions[?(@.type=="Ready")].status' @@ -10688,548 +4444,1261 @@ spec: spec: type: object properties: - partitions: - type: integer - minimum: 1 + version: + type: string description: >- - The number of partitions the topic should have. This cannot - be decreased after topic creation. It can be increased after - topic creation, but it is important to understand the - consequences that has, especially for topics with semantic - partitioning. When absent this will default to the broker - configuration for `num.partitions`. + The Kafka MirrorMaker version. Defaults to + {DefaultKafkaVersion}. Consult the documentation to + understand the process required to upgrade or downgrade the + version. replicas: type: integer - minimum: 1 - maximum: 32767 - description: >- - The number of replicas the topic should have. When absent - this will default to the broker configuration for - `default.replication.factor`. - config: - x-kubernetes-preserve-unknown-fields: true + minimum: 0 + description: The number of pods in the `Deployment`. + image: + type: string + description: The docker image for the pods. + consumer: + type: object + properties: + numStreams: + type: integer + minimum: 1 + description: >- + Specifies the number of consumer stream threads to + create. + offsetCommitInterval: + type: integer + description: >- + Specifies the offset auto-commit interval in ms. Default + value is 60000. + bootstrapServers: + type: string + description: >- + A list of host:port pairs for establishing the initial + connection to the Kafka cluster. + groupId: + type: string + description: >- + A unique string that identifies the consumer group this + consumer belongs to. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the access + token which was obtained from the authorization + server. + accessTokenIsJwt: + type: boolean + description: >- + Configure whether access token should be treated as + JWT. This should be set to `false` if the + authorization server returns opaque tokens. Defaults + to `true`. + audience: + type: string + description: >- + OAuth audience to use when authenticating against + the authorization server. Some authorization servers + require the audience to be explicitly set. The + possible values depend on how the authorization + server is configured. By default, `audience` is not + specified when performing the token endpoint + request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - key + - secretName + description: >- + Reference to the `Secret` which holds the + certificate and private key pair. + clientId: + type: string + description: >- + OAuth Client ID which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the OAuth + client secret which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + connectTimeoutSeconds: + type: integer + description: >- + The connect timeout in seconds when connecting to + authorization server. If not set, the effective + connect timeout is 60 seconds. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. Default + value is `false`. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value is + `false`. + httpRetries: + type: integer + description: >- + The maximum number of retries to attempt if an + initial HTTP request fails. If not set, the default + is to not attempt any retries. + httpRetryPauseMs: + type: integer + description: >- + The pause to take before retrying a failed HTTP + request. If not set, the default is to not pause at + all but to immediately repeat a request. + maxTokenExpirySeconds: + type: integer + description: >- + Set or limit time-to-live of the access tokens to + the specified number of seconds. This should be set + if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: >- + The name of the key in the Secret under which + the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + readTimeoutSeconds: + type: integer + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the effective read + timeout is 60 seconds. + refreshToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the refresh + token which can be used to obtain access token from + the authorization server. + scope: + type: string + description: >- + OAuth scope to use when authenticating against the + authorization server. Some authorization servers + require this to be set. The possible values depend + on how authorization server is configured. By + default `scope` is not specified when doing the + token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the OAuth + server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: >- + Authentication type. Currently the supported types + are `tls`, `scram-sha-256`, `scram-sha-512`, + `plain`, and 'oauth'. `scram-sha-256` and + `scram-sha-512` types use SASL SCRAM-SHA-256 and + SASL SCRAM-SHA-512 Authentication, respectively. + `plain` type uses SASL PLAIN Authentication. `oauth` + type uses SASL OAUTHBEARER Authentication. The `tls` + type uses TLS Client Authentication. The `tls` type + is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: >- + Authentication configuration for connecting to the + cluster. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The MirrorMaker consumer config. Properties with the + following prefixes cannot be set: ssl., + bootstrap.servers, group.id, sasl., security., + interceptor.classes (with the exception of: + ssl.endpoint.identification.algorithm, + ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: >- + TLS configuration for connecting MirrorMaker to the + cluster. + required: + - bootstrapServers + - groupId + description: Configuration of source cluster. + producer: + type: object + properties: + bootstrapServers: + type: string + description: >- + A list of host:port pairs for establishing the initial + connection to the Kafka cluster. + abortOnSendFailure: + type: boolean + description: >- + Flag to set the MirrorMaker to exit on a failed send. + Default value is `true`. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the access + token which was obtained from the authorization + server. + accessTokenIsJwt: + type: boolean + description: >- + Configure whether access token should be treated as + JWT. This should be set to `false` if the + authorization server returns opaque tokens. Defaults + to `true`. + audience: + type: string + description: >- + OAuth audience to use when authenticating against + the authorization server. Some authorization servers + require the audience to be explicitly set. The + possible values depend on how the authorization + server is configured. By default, `audience` is not + specified when performing the token endpoint + request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: The name of the file certificate in the Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - key + - secretName + description: >- + Reference to the `Secret` which holds the + certificate and private key pair. + clientId: + type: string + description: >- + OAuth Client ID which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the OAuth + client secret which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + connectTimeoutSeconds: + type: integer + description: >- + The connect timeout in seconds when connecting to + authorization server. If not set, the effective + connect timeout is 60 seconds. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. Default + value is `false`. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value is + `false`. + httpRetries: + type: integer + description: >- + The maximum number of retries to attempt if an + initial HTTP request fails. If not set, the default + is to not attempt any retries. + httpRetryPauseMs: + type: integer + description: >- + The pause to take before retrying a failed HTTP + request. If not set, the default is to not pause at + all but to immediately repeat a request. + maxTokenExpirySeconds: + type: integer + description: >- + Set or limit time-to-live of the access tokens to + the specified number of seconds. This should be set + if the authorization server returns opaque tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: >- + The name of the key in the Secret under which + the password is stored. + secretName: + type: string + description: The name of the Secret containing the password. + required: + - password + - secretName + description: Reference to the `Secret` which holds the password. + readTimeoutSeconds: + type: integer + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the effective read + timeout is 60 seconds. + refreshToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing the + secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the refresh + token which can be used to obtain access token from + the authorization server. + scope: + type: string + description: >- + OAuth scope to use when authenticating against the + authorization server. Some authorization servers + require this to be set. The possible values depend + on how authorization server is configured. By + default `scope` is not specified when doing the + token endpoint request. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the OAuth + server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: >- + Authentication type. Currently the supported types + are `tls`, `scram-sha-256`, `scram-sha-512`, + `plain`, and 'oauth'. `scram-sha-256` and + `scram-sha-512` types use SASL SCRAM-SHA-256 and + SASL SCRAM-SHA-512 Authentication, respectively. + `plain` type uses SASL PLAIN Authentication. `oauth` + type uses SASL OAUTHBEARER Authentication. The `tls` + type uses TLS Client Authentication. The `tls` type + is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: >- + Authentication configuration for connecting to the + cluster. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The MirrorMaker producer config. Properties with the + following prefixes cannot be set: ssl., + bootstrap.servers, sasl., security., interceptor.classes + (with the exception of: + ssl.endpoint.identification.algorithm, + ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). + tls: + type: object + properties: + trustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: >- + TLS configuration for connecting MirrorMaker to the + cluster. + required: + - bootstrapServers + description: Configuration of target cluster. + resources: type: object - description: The topic configuration. - topicName: + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + whitelist: type: string description: >- - The name of the topic. When absent this will default to the - metadata.name of the topic. It is recommended to not set - this unless the topic name is not a valid Kubernetes - resource name. - description: The specification of the topic. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - topicName: - type: string - description: Topic name. - description: The status of the topic. - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkabridges.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaBridge - listKind: KafkaBridgeList - singular: kafkabridge - plural: kafkabridges - shortNames: - - kb - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - scale: - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - labelSelectorPath: .status.labelSelector - additionalPrinterColumns: - - name: Desired replicas - description: The desired number of Kafka Bridge replicas - jsonPath: .spec.replicas - type: integer - - name: Bootstrap Servers - description: The boostrap servers - jsonPath: .spec.bootstrapServers - type: string - priority: 1 - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - replicas: - type: integer - minimum: 0 - description: The number of pods in the `Deployment`. - image: - type: string - description: The docker image for the pods. - bootstrapServers: + List of topics which are included for mirroring. This option + allows any regular expression using Java-style regular + expressions. Mirroring two topics named A and B is achieved + by using the expression `A\|B`. Or, as a special case, you + can mirror all topics using the regular expression `*`. You + can also specify multiple regular expressions separated by + commas. + include: type: string description: >- - A list of host:port pairs for establishing the initial - connection to the Kafka cluster. - tls: + List of topics which are included for mirroring. This option + allows any regular expression using Java-style regular + expressions. Mirroring two topics named A and B is achieved + by using the expression `A\|B`. Or, as a special case, you + can mirror all topics using the regular expression `*`. You + can also specify multiple regular expressions separated by + commas. + jvmOptions: type: object properties: - trustedCertificates: + '-XX': + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean + description: >- + Specifies whether the Garbage Collection logging is + enabled. The default is false. + javaSystemProperties: type: array items: type: object properties: - certificate: + name: type: string - description: The name of the file certificate in the Secret. - secretName: + description: The system property name. + value: type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - secretName - description: Trusted certificates for TLS connection. - description: >- - TLS configuration for connecting Kafka Bridge to the - cluster. - authentication: - type: object - properties: - accessToken: - type: object - properties: - key: - type: string - description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the access token - which was obtained from the authorization server. - accessTokenIsJwt: - type: boolean - description: >- - Configure whether access token should be treated as JWT. - This should be set to `false` if the authorization - server returns opaque tokens. Defaults to `true`. - certificateAndKey: + description: The system property value. + description: >- + A map of additional system properties which will be + passed using the `-D` option to the JVM. + description: JVM Options for pods. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: 'Logging type, must be either ''inline'' or ''external''.' + valueFrom: type: object properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - key: - type: string - description: The name of the private key in the Secret. - secretName: - type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - key - - secretName + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing the + configuration. description: >- - Reference to the `Secret` which holds the certificate - and private key pair. - clientId: + `ConfigMap` entry where the logging configuration is + stored. + required: + - type + description: Logging configuration for MirrorMaker. + metricsConfig: + type: object + properties: + type: type: string + enum: + - jmxPrometheusExporter description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the token - endpoint URI. - clientSecret: + Metrics type. Only 'jmxPrometheusExporter' supported + currently. + valueFrom: type: object properties: - key: - type: string - description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: - type: string + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth client - secret which the Kafka client can use to authenticate - against the OAuth server and use the token endpoint URI. - disableTlsHostnameVerification: - type: boolean + Reference to the key in the ConfigMap containing the + configuration. description: >- - Enable or disable TLS hostname verification. Default - value is `false`. - maxTokenExpirySeconds: - type: integer + ConfigMap entry where the Prometheus JMX Exporter + configuration is stored. For details of the structure of + this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + tracing: + type: object + properties: + type: + type: string + enum: + - jaeger + - opentelemetry description: >- - Set or limit time-to-live of the access tokens to the - specified number of seconds. This should be set if the - authorization server returns opaque tokens. - passwordSecret: + Type of the tracing used. Currently the only supported + types are `jaeger` for OpenTracing (Jaeger) tracing and + `opentelemetry` for OpenTelemetry tracing. The + OpenTracing (Jaeger) tracing is deprecated. + required: + - type + description: The configuration of tracing in Kafka MirrorMaker. + template: + type: object + properties: + deployment: type: object properties: - password: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + deploymentStrategy: type: string + enum: + - RollingUpdate + - Recreate description: >- - The name of the key in the Secret under which the - password is stored. - secretName: - type: string - description: The name of the Secret containing the password. - required: - - password - - secretName - description: Reference to the `Secret` which holds the password. - refreshToken: + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for Kafka MirrorMaker `Deployment`. + pod: type: object properties: - key: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same namespace + to use for pulling any of the images used by this + Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` + environment variable in Cluster Operator and the + `imagePullSecrets` option are specified, only the + `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and common + container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds after + the processes running in the pod are sent a + termination signal, and the time when the processes + are forcibly halted with a kill signal. Set this + value to longer than the expected cleanup time for + your process. Value must be a non-negative integer. + A zero value indicates delete immediately. You might + need to increase the grace period for very large + Kafka clusters, so that the Kafka brokers have + enough time to transfer their work to another broker + before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: type: string description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: type: string description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the refresh token - which can be used to obtain access token from the - authorization server. - scope: - type: string - description: >- - OAuth scope to use when authenticating against the - authorization server. Some authorization servers require - this to be set. The possible values depend on how - authorization server is configured. By default `scope` - is not specified when doing the token endpoint request. - tlsTrustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - secretName: - type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the OAuth - server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - tls - - scram-sha-512 - - plain - - oauth - description: >- - Authentication type. Currently the only supported types - are `tls`, `scram-sha-512`, and `plain`. `scram-sha-512` - type uses SASL SCRAM-SHA-512 Authentication. `plain` - type uses SASL PLAIN Authentication. `oauth` type uses - SASL OAUTHBEARER Authentication. The `tls` type uses TLS - Client Authentication. The `tls` type is supported only - over TLS connections. - username: - type: string - description: Username used for the authentication. - required: - - type - description: Authentication configuration for connecting to the cluster. - http: - type: object - properties: - port: - type: integer - minimum: 1023 - description: The port which is the server listening on. - cors: - type: object - properties: - allowedOrigins: + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler will + be used. + hostAliases: type: array items: - type: string + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string description: >- - List of allowed origins. Java regular expressions - can be used. - allowedMethods: - type: array - items: - type: string - description: List of allowed HTTP methods. - required: - - allowedOrigins - - allowedMethods - description: CORS configuration for the HTTP Bridge. - description: The HTTP related configuration. - consumer: - type: object - properties: - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka consumer configuration used for consumer - instances created by the bridge. Properties with the - following prefixes cannot be set: ssl., - bootstrap.servers, group.id, sasl., security. (with the - exception of: ssl.endpoint.identification.algorithm, - ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). - description: Kafka consumer related configuration. - producer: - type: object - properties: - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka producer configuration used for producer - instances created by the bridge. Properties with the - following prefixes cannot be set: ssl., - bootstrap.servers, sasl., security. (with the exception - of: ssl.endpoint.identification.algorithm, - ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). - description: Kafka producer related configuration. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: '**Currently not supported** JVM Options for pods.' - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean + The pod's HostAliases. HostAliases is an optional + list of hosts and IPs that will be injected into the + Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir volume + (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration for Kafka Bridge. - enableMetrics: - type: boolean - description: Enable the metrics for the Kafka Bridge. Default is false. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod readiness checking. - template: - type: object - properties: - deployment: + Indicates whether information about services should + be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka MirrorMaker `Pods`. + podDisruptionBudget: type: object properties: metadata: @@ -11251,18 +5720,97 @@ spec: be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. - description: Metadata applied to the resource. - deploymentStrategy: - type: string - enum: - - RollingUpdate - - Recreate description: >- - DeploymentStrategy which will be used for this - Deployment. Valid values are `RollingUpdate` and - `Recreate`. Defaults to `RollingUpdate`. - description: Template for Kafka Bridge `Deployment`. - pod: + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: >- + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is allowed + when the `maxUnavailable` number of pods or fewer + are unavailable after the eviction. Setting this + value to 0 prevents all voluntary evictions, so the + pods must be evicted manually. Defaults to 1. + description: Template for Kafka MirrorMaker `PodDisruptionBudget`. + mirrorMakerContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for Kafka MirrorMaker container. + serviceAccount: type: object properties: metadata: @@ -11285,2003 +5833,4787 @@ spec: `StatefulSets`, `Deployments`, `Pods`, and `Services`. description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same namespace - to use for pulling any of the images used by this - Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` - environment variable in Cluster Operator and the - `imagePullSecrets` option are specified, only the - `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: + description: Template for the Kafka MirrorMaker service account. + description: >- + Template to specify how Kafka MirrorMaker resources, + `Deployments` and `Pods`, are generated. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. + Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default to + 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to + 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default to + 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. + Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default to + 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to + 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default to + 5 seconds. Minimum value is 1. + description: Pod readiness checking. + oneOf: + - properties: + include: {} + required: + - include + - properties: + whitelist: {} + required: + - whitelist + required: + - replicas + - consumer + - producer + description: The specification of Kafka MirrorMaker. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: >- + The current number of pods being used to provide this + resource. + description: The status of Kafka MirrorMaker. + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-entity-operator + labels: + app: strimzi +rules: + - apiGroups: + - kafka.strimzi.io + resources: + - kafkatopics + - kafkatopics/status + - kafkausers + - kafkausers/status + verbs: + - get + - list + - watch + - create + - patch + - update + - delete + - apiGroups: + - '' + resources: + - events + verbs: + - create + - apiGroups: + - '' + resources: + - secrets + verbs: + - get + - list + - watch + - create + - delete + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-cluster-operator-global + labels: + app: strimzi +rules: + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - get + - list + - watch + - create + - delete + - patch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - apiGroups: + - '' + resources: + - nodes + verbs: + - list + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-kafka-client + labels: + app: strimzi +rules: + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: strimzi-cluster-operator-kafka-client-delegation + labels: + app: strimzi +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: private +roleRef: + kind: ClusterRole + name: strimzi-kafka-client + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: strimzipodsets.core.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: core.strimzi.io + names: + kind: StrimziPodSet + listKind: StrimziPodSetList + singular: strimzipodset + plural: strimzipodsets + shortNames: + - sps + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Pods + description: Number of pods managed by the StrimziPodSet + jsonPath: .status.pods + type: integer + - name: Ready Pods + description: Number of ready pods managed by the StrimziPodSet + jsonPath: .status.readyPods + type: integer + - name: Current Pods + description: Number of up-to-date pods managed by the StrimziPodSet + jsonPath: .status.currentPods + type: integer + - name: Age + description: Age of the StrimziPodSet + jsonPath: .metadata.creationTimestamp + type: date + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + selector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Selector is a label query which matches all the pods managed + by this `StrimziPodSet`. Only `matchLabels` is supported. If + `matchExpressions` is set, it will be ignored. + pods: + type: array + items: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The Pods managed by this StrimziPodSet. + required: + - selector + - pods + description: The specification of the StrimziPodSet. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + pods: + type: integer + description: Number of pods managed by the StrimziPodSet controller. + readyPods: + type: integer + description: >- + Number of pods managed by the StrimziPodSet controller that + are ready. + currentPods: + type: integer + description: >- + Number of pods managed by the StrimziPodSet controller that + have the current revision. + description: The status of the StrimziPodSet. + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: strimzi-cluster-operator + labels: + app: strimzi + namespace: private +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: private +roleRef: + kind: ClusterRole + name: strimzi-cluster-operator-namespaced + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: strimzi-cluster-operator-entity-operator-delegation + labels: + app: strimzi + namespace: private +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: private +roleRef: + kind: ClusterRole + name: strimzi-entity-operator + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-cluster-operator-watched + labels: + app: strimzi +rules: + - apiGroups: + - '' + resources: + - pods + verbs: + - watch + - list + - apiGroups: + - kafka.strimzi.io + resources: + - kafkas + - kafkas/status + - kafkaconnects + - kafkaconnects/status + - kafkaconnectors + - kafkaconnectors/status + - kafkamirrormakers + - kafkamirrormakers/status + - kafkabridges + - kafkabridges/status + - kafkamirrormaker2s + - kafkamirrormaker2s/status + - kafkarebalances + - kafkarebalances/status + verbs: + - get + - list + - watch + - create + - delete + - patch + - update + - apiGroups: + - core.strimzi.io + resources: + - strimzipodsets + - strimzipodsets/status + verbs: + - get + - list + - watch + - create + - delete + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: strimzi-cluster-operator-leader-election + labels: + app: strimzi +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - strimzi-cluster-operator + verbs: + - get + - list + - watch + - delete + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: strimzi-cluster-operator + labels: + app: strimzi +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: private +roleRef: + kind: ClusterRole + name: strimzi-cluster-operator-global + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkamirrormaker2s.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: KafkaMirrorMaker2 + listKind: KafkaMirrorMaker2List + singular: kafkamirrormaker2 + plural: kafkamirrormaker2s + shortNames: + - kmm2 + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + scale: + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + labelSelectorPath: .status.labelSelector + additionalPrinterColumns: + - name: Desired replicas + description: The desired number of Kafka MirrorMaker 2.0 replicas + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + version: + type: string + description: >- + The Kafka Connect version. Defaults to + {DefaultKafkaVersion}. Consult the user documentation to + understand the process required to upgrade or downgrade the + version. + replicas: + type: integer + description: The number of pods in the Kafka Connect group. + image: + type: string + description: The docker image for the pods. + connectCluster: + type: string + description: >- + The cluster alias used for Kafka Connect. The alias must + match a cluster in the list at `spec.clusters`. + clusters: + type: array + items: + type: object + properties: + alias: + type: string + pattern: '^[a-zA-Z0-9\._\-]{1,100}$' + description: Alias used to reference the Kafka cluster. + bootstrapServers: + type: string + description: >- + A comma-separated list of `host:port` pairs for + establishing the connection to the Kafka cluster. + tls: + type: object + properties: + trustedCertificates: + type: array + items: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: + certificate: type: string - runAsUserName: + description: >- + The name of the file certificate in the + Secret. + secretName: type: string - description: >- - Configures pod-level security attributes and common - container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds after - the processes running in the pod are sent a - termination signal, and the time when the processes - are forcibly halted with a kill signal. Set this - value to longer than the expected cleanup time for - your process. Value must be a non-negative integer. - A zero value indicates delete immediately. You might - need to increase the grace period for very large - Kafka clusters, so that the Kafka brokers have - enough time to transfer their work to another broker - before they are terminated. Defaults to 30 seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: Trusted certificates for TLS connection. + description: >- + TLS configuration for connecting MirrorMaker 2.0 + connectors to a cluster. + authentication: + type: object + properties: + accessToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing + the secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the access + token which was obtained from the authorization + server. + accessTokenIsJwt: + type: boolean + description: >- + Configure whether access token should be treated + as JWT. This should be set to `false` if the + authorization server returns opaque tokens. + Defaults to `true`. + audience: + type: string + description: >- + OAuth audience to use when authenticating against + the authorization server. Some authorization + servers require the audience to be explicitly set. + The possible values depend on how the + authorization server is configured. By default, + `audience` is not specified when performing the + token endpoint request. + certificateAndKey: + type: object + properties: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + key: + type: string + description: The name of the private key in the Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - key + - secretName + description: >- + Reference to the `Secret` which holds the + certificate and private key pair. + clientId: + type: string + description: >- + OAuth Client ID which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + clientSecret: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing + the secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the OAuth + client secret which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + connectTimeoutSeconds: + type: integer + description: >- + The connect timeout in seconds when connecting to + authorization server. If not set, the effective + connect timeout is 60 seconds. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. + Default value is `false`. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value is + `false`. + httpRetries: + type: integer + description: >- + The maximum number of retries to attempt if an + initial HTTP request fails. If not set, the + default is to not attempt any retries. + httpRetryPauseMs: + type: integer + description: >- + The pause to take before retrying a failed HTTP + request. If not set, the default is to not pause + at all but to immediately repeat a request. + maxTokenExpirySeconds: + type: integer + description: >- + Set or limit time-to-live of the access tokens to + the specified number of seconds. This should be + set if the authorization server returns opaque + tokens. + passwordSecret: + type: object + properties: + password: + type: string + description: >- + The name of the key in the Secret under which + the password is stored. + secretName: + type: string + description: >- + The name of the Secret containing the + password. + required: + - password + - secretName + description: >- + Reference to the `Secret` which holds the + password. + readTimeoutSeconds: + type: integer + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the effective + read timeout is 60 seconds. + refreshToken: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is stored + in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret containing + the secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the refresh + token which can be used to obtain access token + from the authorization server. + scope: + type: string + description: >- + OAuth scope to use when authenticating against the + authorization server. Some authorization servers + require this to be set. The possible values depend + on how authorization server is configured. By + default `scope` is not specified when doing the + token endpoint request. + tlsTrustedCertificates: + type: array + items: type: object properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the + OAuth server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - tls + - scram-sha-256 + - scram-sha-512 + - plain + - oauth + description: >- + Authentication type. Currently the supported types + are `tls`, `scram-sha-256`, `scram-sha-512`, + `plain`, and 'oauth'. `scram-sha-256` and + `scram-sha-512` types use SASL SCRAM-SHA-256 and + SASL SCRAM-SHA-512 Authentication, respectively. + `plain` type uses SASL PLAIN Authentication. + `oauth` type uses SASL OAUTHBEARER Authentication. + The `tls` type uses TLS Client Authentication. The + `tls` type is supported only over TLS connections. + username: + type: string + description: Username used for the authentication. + required: + - type + description: >- + Authentication configuration for connecting to the + cluster. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The MirrorMaker 2.0 cluster config. Properties with + the following prefixes cannot be set: ssl., sasl., + security., listeners, plugin.path, rest., + bootstrap.servers, consumer.interceptor.classes, + producer.interceptor.classes (with the exception of: + ssl.endpoint.identification.algorithm, + ssl.cipher.suites, ssl.protocol, + ssl.enabled.protocols). + required: + - alias + - bootstrapServers + description: Kafka clusters for mirroring. + mirrors: + type: array + items: + type: object + properties: + sourceCluster: + type: string + description: >- + The alias of the source cluster used by the Kafka + MirrorMaker 2.0 connectors. The alias must match a + cluster in the list at `spec.clusters`. + targetCluster: + type: string + description: >- + The alias of the target cluster used by the Kafka + MirrorMaker 2.0 connectors. The alias must match a + cluster in the list at `spec.clusters`. + sourceConnector: + type: object + properties: + tasksMax: + type: integer + minimum: 1 + description: >- + The maximum number of tasks for the Kafka + Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka Connector configuration. The following + properties cannot be set: connector.class, + tasks.max. + autoRestart: type: object properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler will - be used. - hostAliases: - type: array - items: + enabled: + type: boolean + description: >- + Whether automatic restart for failed + connectors and tasks should be enabled or + disabled. + description: >- + Automatic restart of connector and tasks + configuration. + pause: + type: boolean + description: >- + Whether the connector should be paused. Defaults + to false. + description: >- + The specification of the Kafka MirrorMaker 2.0 source + connector. + heartbeatConnector: + type: object + properties: + tasksMax: + type: integer + minimum: 1 + description: >- + The maximum number of tasks for the Kafka + Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka Connector configuration. The following + properties cannot be set: connector.class, + tasks.max. + autoRestart: type: object properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an optional - list of hosts and IPs that will be injected into the - Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services should - be injected into Pod's environment variables. - topologySpreadConstraints: - type: array - items: + enabled: + type: boolean + description: >- + Whether automatic restart for failed + connectors and tasks should be enabled or + disabled. + description: >- + Automatic restart of connector and tasks + configuration. + pause: + type: boolean + description: >- + Whether the connector should be paused. Defaults + to false. + description: >- + The specification of the Kafka MirrorMaker 2.0 + heartbeat connector. + checkpointConnector: + type: object + properties: + tasksMax: + type: integer + minimum: 1 + description: >- + The maximum number of tasks for the Kafka + Connector. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The Kafka Connector configuration. The following + properties cannot be set: connector.class, + tasks.max. + autoRestart: type: object properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for Kafka Bridge `Pods`. - apiService: + enabled: + type: boolean + description: >- + Whether automatic restart for failed + connectors and tasks should be enabled or + disabled. + description: >- + Automatic restart of connector and tasks + configuration. + pause: + type: boolean + description: >- + Whether the connector should be paused. Defaults + to false. + description: >- + The specification of the Kafka MirrorMaker 2.0 + checkpoint connector. + topicsPattern: + type: string + description: >- + A regular expression matching the topics to be + mirrored, for example, "topic1\|topic2\|topic3". + Comma-separated lists are also supported. + topicsBlacklistPattern: + type: string + description: >- + A regular expression matching the topics to exclude + from mirroring. Comma-separated lists are also + supported. + topicsExcludePattern: + type: string + description: >- + A regular expression matching the topics to exclude + from mirroring. Comma-separated lists are also + supported. + groupsPattern: + type: string + description: >- + A regular expression matching the consumer groups to + be mirrored. Comma-separated lists are also supported. + groupsBlacklistPattern: + type: string + description: >- + A regular expression matching the consumer groups to + exclude from mirroring. Comma-separated lists are also + supported. + groupsExcludePattern: + type: string + description: >- + A regular expression matching the consumer groups to + exclude from mirroring. Comma-separated lists are also + supported. + required: + - sourceCluster + - targetCluster + description: Configuration of the MirrorMaker 2.0 connectors. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + The maximum limits for CPU and memory resources and the + requested initial resources. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. + Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default to + 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to + 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default to + 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults to 3. + Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default to + 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults to + 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default to + 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: + type: object + properties: + '-XX': + x-kubernetes-preserve-unknown-fields: true type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the service. - Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP family - on single-stack clusters. `RequireDualStack` fails - unless there are two IP families on dual-stack - configured clusters. If unspecified, Kubernetes will - choose the default value based on the service type. - Available on Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean + description: >- + Specifies whether the Garbage Collection logging is + enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for Kafka Bridge API `Service`. - bridgeContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka Bridge container. - podDisruptionBudget: + description: The system property name. + value: + type: string + description: The system property value. + description: >- + A map of additional system properties which will be + passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: + type: object + properties: + authentication: type: object properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 + type: + type: string + enum: + - password description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is allowed - when the `maxUnavailable` number of pods or fewer - are unavailable after the eviction. Setting this - value to 0 prevents all voluntary evictions, so the - pods must be evicted manually. Defaults to 1. - description: Template for Kafka Bridge `PodDisruptionBudget`. - description: >- - Template for Kafka Bridge resources. The template allows - users to specify how is the `Deployment` and `Pods` - generated. - tracing: + Authentication type. Currently the only supported + types are `password`.`password` type creates a + username and protected port with no TLS. + required: + - type + description: >- + Authentication configuration for connecting to the JMX + port. + description: JMX Options. + logging: type: object properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. type: type: string enum: - - jaeger + - inline + - external + description: 'Logging type, must be either ''inline'' or ''external''.' + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing the + configuration. description: >- - Type of the tracing used. Currently the only supported - type is `jaeger` for Jaeger tracing. + `ConfigMap` entry where the logging configuration is + stored. required: - type - description: The configuration of tracing in Kafka Bridge. - required: - - bootstrapServers - description: The specification of the Kafka Bridge. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - url: - type: string - description: >- - The URL at which external client applications can access the - Kafka Bridge. - labelSelector: - type: string - description: Label selector for pods providing this resource. - replicas: - type: integer - description: >- - The current number of pods being used to provide this - resource. - description: The status of the Kafka Bridge. - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkaconnectors.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaConnector - listKind: KafkaConnectorList - singular: kafkaconnector - plural: kafkaconnectors - shortNames: - - kctr - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - scale: - specReplicasPath: .spec.tasksMax - statusReplicasPath: .status.tasksMax - additionalPrinterColumns: - - name: Cluster - description: The name of the Kafka Connect cluster this connector belongs to - jsonPath: .metadata.labels.strimzi\.io/cluster - type: string - - name: Connector class - description: The class used by this connector - jsonPath: .spec.class - type: string - - name: Max Tasks - description: Maximum number of tasks - jsonPath: .spec.tasksMax - type: integer - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - class: + description: Logging configuration for Kafka Connect. + clientRackInitImage: type: string - description: The Class for the Kafka Connector. - tasksMax: - type: integer - minimum: 1 - description: The maximum number of tasks for the Kafka Connector. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka Connector configuration. The following properties - cannot be set: connector.class, tasks.max. - pause: - type: boolean - description: Whether the connector should be paused. Defaults to false. - description: The specification of the Kafka Connector. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer description: >- - The generation of the CRD that was last reconciled by the - operator. - connectorStatus: - x-kubernetes-preserve-unknown-fields: true + The image of the init container used for initializing the + `client.rack`. + rack: type: object + properties: + topologyKey: + type: string + example: topology.kubernetes.io/zone + description: >- + A key that matches labels assigned to the Kubernetes + cluster nodes. The value of the label is used to set a + broker's `broker.rack` config, and the `client.rack` + config for Kafka Connect or MirrorMaker 2.0. + required: + - topologyKey description: >- - The connector status, as reported by the Kafka Connect REST - API. - tasksMax: - type: integer - description: The maximum number of tasks for the Kafka Connector. - topics: - type: array - items: - type: string - description: The list of topics used by the Kafka Connector. - description: The status of the Kafka Connector. - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkaconnects2is.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaConnectS2I - listKind: KafkaConnectS2IList - singular: kafkaconnects2i - plural: kafkaconnects2is - shortNames: - - kcs2i - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - scale: - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - labelSelectorPath: .status.labelSelector - additionalPrinterColumns: - - name: Desired replicas - description: The desired number of Kafka Connect replicas - jsonPath: .spec.replicas - type: integer - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - version: - type: string - description: >- - The Kafka Connect version. Defaults to - {DefaultKafkaVersion}. Consult the user documentation to - understand the process required to upgrade or downgrade the - version. - replicas: - type: integer - description: The number of pods in the Kafka Connect group. - image: - type: string - description: The docker image for the pods. - buildResources: + Configuration of the node label which will be used as the + `client.rack` consumer configuration. + tracing: type: object properties: - limits: - x-kubernetes-preserve-unknown-fields: true + type: + type: string + enum: + - jaeger + - opentelemetry + description: >- + Type of the tracing used. Currently the only supported + types are `jaeger` for OpenTracing (Jaeger) tracing and + `opentelemetry` for OpenTelemetry tracing. The + OpenTracing (Jaeger) tracing is deprecated. + required: + - type + description: The configuration of tracing in Kafka Connect. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: >- + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for Kafka Connect `Deployment`. + podSet: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for Kafka Connect `StrimziPodSet` resource. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same namespace + to use for pulling any of the images used by this + Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` + environment variable in Cluster Operator and the + `imagePullSecrets` option are specified, only the + `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and common + container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds after + the processes running in the pod are sent a + termination signal, and the time when the processes + are forcibly halted with a kill signal. Set this + value to longer than the expected cleanup time for + your process. Value must be a non-negative integer. + A zero value indicates delete immediately. You might + need to increase the grace period for very large + Kafka clusters, so that the Kafka brokers have + enough time to transfer their work to another broker + before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: >- + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: >- + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler will + be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an optional + list of hosts and IPs that will be injected into the + Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir volume + (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services should + be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Connect `Pods`. + apiService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the service. + Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters. `RequireDualStack` fails + unless there are two IP families on dual-stack + configured clusters. If unspecified, Kubernetes will + choose the default value based on the service type. + Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Kafka Connect API `Service`. + headlessService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the service. + Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP family + on single-stack clusters. `RequireDualStack` fails + unless there are two IP families on dual-stack + configured clusters. If unspecified, Kubernetes will + choose the default value based on the service type. + Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Kafka Connect headless `Service`. + connectContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Connect container. + initContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka init container. + podDisruptionBudget: type: object - requests: - x-kubernetes-preserve-unknown-fields: true + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: >- + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: >- + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is allowed + when the `maxUnavailable` number of pods or fewer + are unavailable after the eviction. Setting this + value to 0 prevents all voluntary evictions, so the + pods must be evicted manually. Defaults to 1. + description: Template for Kafka Connect `PodDisruptionBudget`. + serviceAccount: type: object - description: CPU and memory resources to reserve. - bootstrapServers: - type: string - description: >- - Bootstrap servers to connect to. This should be given as a - comma separated list of __:‍__ pairs. - tls: - type: object - properties: - trustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - secretName: - type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - secretName - description: Trusted certificates for TLS connection. - description: TLS configuration. - authentication: - type: object - properties: - accessToken: + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect service account. + clusterRoleBinding: type: object properties: - key: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect ClusterRoleBinding. + buildPod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same namespace + to use for pulling any of the images used by this + Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` + environment variable in Cluster Operator and the + `imagePullSecrets` option are specified, only the + `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and common + container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds after + the processes running in the pod are sent a + termination signal, and the time when the processes + are forcibly halted with a kill signal. Set this + value to longer than the expected cleanup time for + your process. Value must be a non-negative integer. + A zero value indicates delete immediately. You might + need to increase the grace period for very large + Kafka clusters, so that the Kafka brokers have + enough time to transfer their work to another broker + before they are terminated. Defaults to 30 seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: type: string description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: type: string description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the access token - which was obtained from the authorization server. - accessTokenIsJwt: - type: boolean + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler will + be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an optional + list of hosts and IPs that will be injected into the + Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir volume + (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services should + be injected into Pod's environment variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. description: >- - Configure whether access token should be treated as JWT. - This should be set to `false` if the authorization - server returns opaque tokens. Defaults to `true`. - certificateAndKey: + Template for Kafka Connect Build `Pods`. The build pod + is used only on Kubernetes. + buildContainer: type: object properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - key: - type: string - description: The name of the private key in the Secret. - secretName: - type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - key - - secretName - description: >- - Reference to the `Secret` which holds the certificate - and private key pair. - clientId: - type: string + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to the + container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the token - endpoint URI. - clientSecret: + Template for the Kafka Connect Build container. The + build container is used only on Kubernetes. + buildConfig: type: object properties: - key: - type: string + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + pullSecret: type: string description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth client - secret which the Kafka client can use to authenticate - against the OAuth server and use the token endpoint URI. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. Default - value is `false`. - maxTokenExpirySeconds: - type: integer + Container Registry Secret with the credentials for + pulling the base image. description: >- - Set or limit time-to-live of the access tokens to the - specified number of seconds. This should be set if the - authorization server returns opaque tokens. - passwordSecret: + Template for the Kafka Connect BuildConfig used to build + new container images. The BuildConfig is used only on + OpenShift. + buildServiceAccount: type: object properties: - password: - type: string - description: >- - The name of the key in the Secret under which the - password is stored. - secretName: - type: string - description: The name of the Secret containing the password. - required: - - password - - secretName - description: Reference to the `Secret` which holds the password. - refreshToken: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Connect Build service account. + jmxSecret: type: object properties: - key: - type: string - description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the refresh token - which can be used to obtain access token from the - authorization server. - scope: - type: string + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. description: >- - OAuth scope to use when authenticating against the - authorization server. Some authorization servers require - this to be set. The possible values depend on how - authorization server is configured. By default `scope` - is not specified when doing the token endpoint request. - tlsTrustedCertificates: + Template for Secret of the Kafka Connect Cluster JMX + authentication. + description: >- + Template for Kafka Connect and Kafka Mirror Maker 2 + resources. The template allows users to specify how the + `Deployment`, `Pods` and `Service` are generated. + externalConfiguration: + type: object + properties: + env: type: array items: type: object properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - secretName: + name: type: string - description: The name of the Secret containing the certificate. + description: >- + Name of the environment variable which will be + passed to the Kafka Connect pods. The name of the + environment variable cannot start with `KAFKA_` or + `STRIMZI_`. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a ConfigMap. + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: Reference to a key in a Secret. + description: >- + Value of the environment variable which will be + passed to the Kafka Connect pods. It can be passed + either as a reference to Secret or ConfigMap + field. The field has to specify exactly one Secret + or ConfigMap. required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the OAuth - server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - tls - - scram-sha-512 - - plain - - oauth - description: >- - Authentication type. Currently the only supported types - are `tls`, `scram-sha-512`, and `plain`. `scram-sha-512` - type uses SASL SCRAM-SHA-512 Authentication. `plain` - type uses SASL PLAIN Authentication. `oauth` type uses - SASL OAUTHBEARER Authentication. The `tls` type uses TLS - Client Authentication. The `tls` type is supported only - over TLS connections. - username: - type: string - description: Username used for the authentication. - required: - - type - description: Authentication configuration for Kafka Connect. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka Connect configuration. Properties with the - following prefixes cannot be set: ssl., sasl., security., - listeners, plugin.path, rest., bootstrap.servers, - consumer.interceptor.classes, producer.interceptor.classes - (with the exception of: - ssl.endpoint.identification.algorithm, ssl.cipher.suites, - ssl.protocol, ssl.enabled.protocols). - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The maximum limits for CPU and memory resources and the - requested initial resources. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 + - name + - valueFrom description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 + Makes data from a Secret or ConfigMap available in the + Kafka Connect pods as environment variables. + volumes: + type: array + items: + type: object + properties: + configMap: + type: object + properties: + defaultMode: + type: integer + items: + type: array + items: + type: object + properties: + key: + type: string + mode: + type: integer + path: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to a key in a ConfigMap. Exactly one + Secret or ConfigMap has to be specified. + name: + type: string + description: >- + Name of the volume which will be added to the + Kafka Connect pods. + secret: + type: object + properties: + defaultMode: + type: integer + items: + type: array + items: + type: object + properties: + key: + type: string + mode: + type: integer + path: + type: string + optional: + type: boolean + secretName: + type: string + description: >- + Reference to a key in a Secret. Exactly one Secret + or ConfigMap has to be specified. + required: + - name description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: + Makes data from a Secret or ConfigMap available in the + Kafka Connect pods as volumes. + description: >- + Pass data from Secrets or ConfigMaps to the Kafka Connect + pods and use them to configure connectors. + metricsConfig: type: object properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 + type: + type: string + enum: + - jmxPrometheusExporter description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 + Metrics type. Only 'jmxPrometheusExporter' supported + currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing the + configuration. description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod readiness checking. - jvmOptions: + ConfigMap entry where the Prometheus JMX Exporter + configuration is stored. For details of the structure of + this configuration, see the {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + required: + - connectCluster + description: The specification of the Kafka MirrorMaker 2.0 cluster. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + url: + type: string + description: >- + The URL of the REST API endpoint for managing and monitoring + Kafka Connect connectors. + autoRestartStatuses: + type: array + items: + type: object + properties: + count: + type: integer + description: >- + The number of times the connector or task is + restarted. + connectorName: + type: string + description: The name of the connector being restarted. + lastRestartTimestamp: + type: string + description: >- + The last time the automatic restart was attempted. The + required format is 'yyyy-MM-ddTHH:mm:ssZ' in the UTC + time zone. + description: List of MirrorMaker 2.0 connector auto restart statuses. + connectorPlugins: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The type of the connector plugin. The available types + are `sink` and `source`. + version: + type: string + description: The version of the connector plugin. + class: + type: string + description: The class of the connector plugin. + description: >- + The list of connector plugins available in this Kafka + Connect deployment. + connectors: + type: array + items: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + List of MirrorMaker 2.0 connector statuses, as reported by + the Kafka Connect REST API. + labelSelector: + type: string + description: Label selector for pods providing this resource. + replicas: + type: integer + description: >- + The current number of pods being used to provide this + resource. + description: The status of the Kafka MirrorMaker 2.0 cluster. + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkas.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: Kafka + listKind: KafkaList + singular: kafka + plural: kafkas + shortNames: + - k + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Desired Kafka replicas + description: The desired number of Kafka replicas in the cluster + jsonPath: .spec.kafka.replicas + type: integer + - name: Desired ZK replicas + description: The desired number of ZooKeeper replicas in the cluster + jsonPath: .spec.zookeeper.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + - name: Warnings + description: Warnings related to the custom resource + jsonPath: '.status.conditions[?(@.type=="Warning")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + kafka: type: object properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': + version: type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': + description: >- + The kafka broker version. Defaults to + {DefaultKafkaVersion}. Consult the user documentation to + understand the process required to upgrade or downgrade + the version. + replicas: + type: integer + minimum: 1 + description: The number of pods in the cluster. + image: type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: + The docker image for the pods. The default value depends + on the configured `Kafka.spec.kafka.version`. + listeners: type: array + minItems: 1 items: type: object properties: name: type: string - description: The system property name. - value: + pattern: '^[a-z0-9]{1,11}$' + description: >- + Name of the listener. The name will be used to + identify the listener and the related Kubernetes + objects. The name has to be unique within given a + Kafka cluster. The name can consist of lowercase + characters and numbers and be up to 11 characters + long. + port: + type: integer + minimum: 9092 + description: >- + Port number used by the listener inside Kafka. The + port number has to be unique within a given Kafka + cluster. Allowed port numbers are 9092 and higher + with the exception of ports 9404 and 9999, which + are already used for Prometheus and JMX. Depending + on the listener type, the port number might not be + the same as the port number that connects Kafka + clients. + type: type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: JVM Options for pods. - jmxOptions: - type: object - properties: - authentication: - type: object - properties: - type: - type: string - enum: - - password - description: >- - Authentication type. Currently the only supported - types are `password`.`password` type creates a - username and protected port with no TLS. - required: - - type - description: >- - Authentication configuration for connecting to the JMX - port. - description: JMX Options. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: - type: object - properties: - configMapKeyRef: - type: object - properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration for Kafka Connect. - tracing: - type: object - properties: - type: - type: string - enum: - - jaeger - description: >- - Type of the tracing used. Currently the only supported - type is `jaeger` for Jaeger tracing. - required: - - type - description: The configuration of tracing in Kafka Connect. - template: - type: object - properties: - deployment: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - deploymentStrategy: - type: string - enum: - - RollingUpdate - - Recreate - description: >- - DeploymentStrategy which will be used for this - Deployment. Valid values are `RollingUpdate` and - `Recreate`. Defaults to `RollingUpdate`. - description: Template for Kafka Connect `Deployment`. - pod: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: + enum: + - internal + - route + - loadbalancer + - nodeport + - ingress + - cluster-ip + description: > + Type of the listener. Currently the supported + types are `internal`, `route`, `loadbalancer`, + `nodeport` and `ingress`. + + + * `internal` type exposes Kafka internally only + within the Kubernetes cluster. + + * `route` type uses OpenShift Routes to expose + Kafka. + + * `loadbalancer` type uses LoadBalancer type + services to expose Kafka. + + * `nodeport` type uses NodePort type services to + expose Kafka. + + * `ingress` type uses Kubernetes Nginx Ingress to + expose Kafka with TLS passthrough. + + * `cluster-ip` type uses a per-broker `ClusterIP` + service. + tls: + type: boolean + description: >- + Enables TLS encryption on the listener. This is a + required property. + authentication: type: object properties: - name: + accessTokenIsJwt: + type: boolean + description: >- + Configure whether the access token is treated + as JWT. This must be set to `false` if the + authorization server returns opaque tokens. + Defaults to `true`. + checkAccessTokenType: + type: boolean + description: >- + Configure whether the access token type check + is performed or not. This should be set to + `false` if the authorization server does not + include 'typ' claim in JWT token. Defaults to + `true`. + checkAudience: + type: boolean + description: >- + Enable or disable audience checking. Audience + checks identify the recipients of tokens. If + audience checking is enabled, the OAuth Client + ID also has to be configured using the + `clientId` property. The Kafka broker will + reject tokens that do not have its `clientId` + in their `aud` (audience) claim.Default value + is `false`. + checkIssuer: + type: boolean + description: >- + Enable or disable issuer checking. By default + issuer is checked using the value configured + by `validIssuerUri`. Default value is `true`. + clientAudience: + type: string + description: >- + The audience to use when making requests to + the authorization server's token endpoint. + Used for inter-broker authentication and for + configuring OAuth 2.0 over PLAIN using the + `clientId` and `secret` method. + clientId: + type: string + description: >- + OAuth Client ID which the Kafka broker can use + to authenticate against the authorization + server and use the introspect endpoint URI. + clientScope: + type: string + description: >- + The scope to use when making requests to the + authorization server's token endpoint. Used + for inter-broker authentication and for + configuring OAuth 2.0 over PLAIN using the + `clientId` and `secret` method. + clientSecret: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is + stored in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret + containing the secret value. + required: + - key + - secretName + description: >- + Link to Kubernetes Secret containing the OAuth + client secret which the Kafka broker can use + to authenticate against the authorization + server and use the introspect endpoint URI. + connectTimeoutSeconds: + type: integer + description: >- + The connect timeout in seconds when connecting + to authorization server. If not set, the + effective connect timeout is 60 seconds. + customClaimCheck: type: string - description: >- - List of references to secrets in the same namespace - to use for pulling any of the images used by this - Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` - environment variable in Cluster Operator and the - `imagePullSecrets` option are specified, only the - `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. - securityContext: - type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: + description: >- + JsonPath filter query to be applied to the JWT + token or to the response of the introspection + endpoint for additional token validation. Not + set by default. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. + Default value is `false`. + enableECDSA: + type: boolean + description: >- + Enable or disable ECDSA support by installing + BouncyCastle crypto provider. ECDSA support is + always enabled. The BouncyCastle libraries are + no longer packaged with Strimzi. Value is + ignored. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value + is `false`. + enableOauthBearer: + type: boolean + description: >- + Enable or disable OAuth authentication over + SASL_OAUTHBEARER. Default value is `true`. + enablePlain: + type: boolean + description: >- + Enable or disable OAuth authentication over + SASL_PLAIN. There is no re-authentication + support when this mechanism is used. Default + value is `false`. + failFast: + type: boolean + description: >- + Enable or disable termination of Kafka broker + processes due to potentially recoverable + runtime errors during startup. Default value + is `true`. + fallbackUserNameClaim: + type: string + description: >- + The fallback username claim to be used for the + user id if the claim specified by + `userNameClaim` is not present. This is useful + when `client_credentials` authentication only + results in the client id being provided in + another claim. It only takes effect if + `userNameClaim` is set. + fallbackUserNamePrefix: + type: string + description: >- + The prefix to use with the value of + `fallbackUserNameClaim` to construct the user + id. This only takes effect if + `fallbackUserNameClaim` is true, and the value + is present for the claim. Mapping usernames + and client ids into the same user id space is + useful in preventing name collisions. + groupsClaim: + type: string + description: >- + JsonPath query used to extract groups for the + user during authentication. Extracted groups + can be used by a custom authorizer. By default + no groups are extracted. + groupsClaimDelimiter: + type: string + description: >- + A delimiter used to parse groups when they are + extracted as a single String value rather than + a JSON array. Default value is ',' (comma). + httpRetries: type: integer - sysctls: - type: array - items: + description: >- + The maximum number of retries to attempt if an + initial HTTP request fails. If not set, the + default is to not attempt any retries. + httpRetryPauseMs: + type: integer + description: >- + The pause to take before retrying a failed + HTTP request. If not set, the default is to + not pause at all but to immediately repeat a + request. + introspectionEndpointUri: + type: string + description: >- + URI of the token introspection endpoint which + can be used to validate opaque non-JWT tokens. + jwksEndpointUri: + type: string + description: >- + URI of the JWKS certificate endpoint, which + can be used for local JWT validation. + jwksExpirySeconds: + type: integer + minimum: 1 + description: >- + Configures how often are the JWKS certificates + considered valid. The expiry interval has to + be at least 60 seconds longer then the refresh + interval specified in `jwksRefreshSeconds`. + Defaults to 360 seconds. + jwksIgnoreKeyUse: + type: boolean + description: >- + Flag to ignore the 'use' attribute of `key` + declarations in a JWKS endpoint response. + Default value is `false`. + jwksMinRefreshPauseSeconds: + type: integer + minimum: 0 + description: >- + The minimum pause between two consecutive + refreshes. When an unknown signing key is + encountered the refresh is scheduled + immediately, but will always wait for this + minimum pause. Defaults to 1 second. + jwksRefreshSeconds: + type: integer + minimum: 1 + description: >- + Configures how often are the JWKS certificates + refreshed. The refresh interval has to be at + least 60 seconds shorter then the expiry + interval specified in `jwksExpirySeconds`. + Defaults to 300 seconds. + listenerConfig: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Configuration to be used for a specific + listener. All values are prefixed with + listener.name.__. + maxSecondsWithoutReauthentication: + type: integer + description: >- + Maximum number of seconds the authenticated + session remains valid without + re-authentication. This enables Apache Kafka + re-authentication feature, and causes sessions + to expire when the access token expires. If + the access token expires before max time or if + max time is reached, the client has to + re-authenticate, otherwise the server will + drop the connection. Not set by default - the + authenticated session does not expire when the + access token expires. This option only applies + to SASL_OAUTHBEARER authentication mechanism + (when `enableOauthBearer` is `true`). + readTimeoutSeconds: + type: integer + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the + effective read timeout is 60 seconds. + sasl: + type: boolean + description: Enable or disable SASL on this listener. + secrets: + type: array + items: + type: object + properties: + key: + type: string + description: >- + The key under which the secret value is + stored in the Kubernetes Secret. + secretName: + type: string + description: >- + The name of the Kubernetes Secret + containing the secret value. + required: + - key + - secretName + description: >- + Secrets to be mounted to + /opt/kafka/custom-authn-secrets/custom-listener-_-_/__. + tlsTrustedCertificates: + type: array + items: + type: object + properties: + certificate: + type: string + description: >- + The name of the file certificate in the + Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the + OAuth server. + tokenEndpointUri: + type: string + description: >- + URI of the Token Endpoint to use with + SASL_PLAIN mechanism when the client + authenticates with `clientId` and a `secret`. + If set, the client can authenticate over + SASL_PLAIN by either setting `username` to + `clientId`, and setting `password` to client + `secret`, or by setting `username` to account + username, and `password` to access token + prefixed with `$accessToken:`. If this option + is not set, the `password` is always + interpreted as an access token (without a + prefix), and `username` as the account + username (a so called 'no-client-credentials' + mode). + type: + type: string + enum: + - tls + - scram-sha-512 + - oauth + - custom + description: >- + Authentication type. `oauth` type uses SASL + OAUTHBEARER Authentication. `scram-sha-512` + type uses SASL SCRAM-SHA-512 Authentication. + `tls` type uses TLS Client Authentication. + `tls` type is supported only on TLS + listeners.`custom` type allows for any + authentication type to be used. + userInfoEndpointUri: + type: string + description: >- + URI of the User Info Endpoint to use as a + fallback to obtaining the user id when the + Introspection Endpoint does not return + information that can be used for the user id. + userNameClaim: + type: string + description: >- + Name of the claim from the JWT authentication + token, Introspection Endpoint response or User + Info Endpoint response which will be used to + extract the user id. Defaults to `sub`. + validIssuerUri: + type: string + description: >- + URI of the token issuer used for + authentication. + validTokenType: + type: string + description: >- + Valid value for the `token_type` attribute + returned by the Introspection Endpoint. No + default value, and not checked by default. + required: + - type + description: Authentication configuration for this listener. + configuration: + type: object + properties: + brokerCertChainAndKey: type: object properties: - name: + certificate: type: string - value: + description: >- + The name of the file certificate in the + Secret. + key: type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: + description: The name of the private key in the Secret. + secretName: + type: string + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - key + - secretName + description: >- + Reference to the `Secret` which holds the + certificate and private key pair which will be + used for this listener. The certificate can + optionally contain the whole chain. This field + can be used only with listeners with enabled + TLS encryption. + externalTrafficPolicy: + type: string + enum: + - Local + - Cluster + description: >- + Specifies whether the service routes external + traffic to node-local or cluster-wide + endpoints. `Cluster` may cause a second hop to + another node and obscures the client source + IP. `Local` avoids a second hop for + LoadBalancer and Nodeport type services and + preserves the client source IP (when supported + by the infrastructure). If unspecified, + Kubernetes will use `Cluster` as the + default.This field can be used only with + `loadbalancer` or `nodeport` type listener. + loadBalancerSourceRanges: + type: array + items: type: string - description: >- - Configures pod-level security attributes and common - container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds after - the processes running in the pod are sent a - termination signal, and the time when the processes - are forcibly halted with a kill signal. Set this - value to longer than the expected cleanup time for - your process. Value must be a non-negative integer. - A zero value indicates delete immediately. You might - need to increase the grace period for very large - Kafka clusters, so that the Kafka brokers have - enough time to transfer their work to another broker - before they are terminated. Defaults to 30 seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: + description: >- + A list of CIDR ranges (for example + `10.0.0.0/8` or `130.211.204.1/32`) from which + clients can connect to load balancer type + listeners. If supported by the platform, + traffic through the loadbalancer is restricted + to the specified CIDR ranges. This field is + applicable only for loadbalancer type services + and is ignored if the cloud provider does not + support the feature. This field can be used + only with `loadbalancer` type listener. + bootstrap: + type: object + properties: + alternativeNames: + type: array + items: + type: string + description: >- + Additional alternative names for the + bootstrap service. The alternative names + will be added to the list of subject + alternative names of the TLS certificates. + host: + type: string + description: >- + The bootstrap host. This field will be + used in the Ingress resource or in the + Route resource to specify the desired + hostname. This field can be used only with + `route` (optional) or `ingress` (required) + type listeners. + nodePort: + type: integer + description: >- + Node port for the bootstrap service. This + field can be used only with `nodeport` + type listener. + loadBalancerIP: + type: string + description: >- + The loadbalancer is requested with the IP + address specified in this field. This + feature depends on whether the underlying + cloud provider supports specifying the + `loadBalancerIP` when a load balancer is + created. This field is ignored if the + cloud provider does not support the + feature.This field can be used only with + `loadbalancer` type listener. + annotations: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: + description: >- + Annotations that will be added to the + `Ingress`, `Route`, or `Service` resource. + You can use this field to configure DNS + providers such as External DNS. This field + can be used only with `loadbalancer`, + `nodeport`, `route`, or `ingress` type + listeners. + labels: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: + description: >- + Labels that will be added to the + `Ingress`, `Route`, or `Service` resource. + This field can be used only with + `loadbalancer`, `nodeport`, `route`, or + `ingress` type listeners. + description: Bootstrap configuration. + brokers: + type: array + items: + type: object + properties: + broker: + type: integer + description: >- + ID of the kafka broker (broker + identifier). Broker IDs start from 0 and + correspond to the number of broker + replicas. + advertisedHost: + type: string + description: >- + The host name which will be used in the + brokers' `advertised.brokers`. + advertisedPort: + type: integer + description: >- + The port number which will be used in + the brokers' `advertised.brokers`. + host: + type: string + description: >- + The broker host. This field will be used + in the Ingress resource or in the Route + resource to specify the desired + hostname. This field can be used only + with `route` (optional) or `ingress` + (required) type listeners. + nodePort: + type: integer + description: >- + Node port for the per-broker service. + This field can be used only with + `nodeport` type listener. + loadBalancerIP: + type: string + description: >- + The loadbalancer is requested with the + IP address specified in this field. This + feature depends on whether the + underlying cloud provider supports + specifying the `loadBalancerIP` when a + load balancer is created. This field is + ignored if the cloud provider does not + support the feature.This field can be + used only with `loadbalancer` type + listener. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations that will be added to the + `Ingress` or `Service` resource. You can + use this field to configure DNS + providers such as External DNS. This + field can be used only with + `loadbalancer`, `nodeport`, or `ingress` + type listeners. + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels that will be added to the + `Ingress`, `Route`, or `Service` + resource. This field can be used only + with `loadbalancer`, `nodeport`, + `route`, or `ingress` type listeners. + required: + - broker + description: Per-broker configurations. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the + service. Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP + family on single-stack clusters. + `RequireDualStack` fails unless there are two + IP families on dual-stack configured clusters. + If unspecified, Kubernetes will choose the + default value based on the service type. + Available on Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the + default value based on the `ipFamilyPolicy` + setting. Available on Kubernetes 1.20 and + newer. + createBootstrapService: + type: boolean + description: >- + Whether to create the bootstrap service or + not. The bootstrap service is created by + default (if not specified differently). This + field can be used with the `loadBalancer` type + listener. + class: + type: string + description: >- + Configures a specific class for `Ingress` and + `LoadBalancer` that defines which controller + will be used. This field can only be used with + `ingress` and `loadbalancer` type listeners. + If not specified, the default controller is + used. For an `ingress` listener, set the + `ingressClassName` property in the `Ingress` + resources. For a `loadbalancer` listener, set + the `loadBalancerClass` property in the + `Service` resources. + finalizers: + type: array + items: + type: string + description: >- + A list of finalizers which will be configured + for the `LoadBalancer` type Services created + for this listener. If supported by the + platform, the finalizer + `service.kubernetes.io/load-balancer-cleanup` + to make sure that the external load balancer + is deleted together with the service.For more + information, see + https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#garbage-collecting-load-balancers. + This field can be used only with + `loadbalancer` type listeners. + maxConnectionCreationRate: + type: integer + description: >- + The maximum connection creation rate we allow + in this listener at any time. New connections + will be throttled if the limit is reached. + maxConnections: + type: integer + description: >- + The maximum number of connections we allow for + this listener in the broker at any time. New + connections are blocked if the limit is + reached. + preferredNodePortAddressType: + type: string + enum: + - ExternalIP + - ExternalDNS + - InternalIP + - InternalDNS + - Hostname + description: >- + Defines which address type should be used as + the node address. Available types are: + `ExternalDNS`, `ExternalIP`, `InternalDNS`, + `InternalIP` and `Hostname`. By default, the + addresses will be used in the following order + (the first one found will be used): + + + * `ExternalDNS` + + * `ExternalIP` + + * `InternalDNS` + + * `InternalIP` + + * `Hostname` + + + This field is used to select the preferred + address type, which is checked first. If no + address is found for this address type, the + other types are checked in the default order. + This field can only be used with `nodeport` + type listener. + useServiceDnsDomain: + type: boolean + description: >- + Configures whether the Kubernetes service DNS + domain should be used or not. If set to + `true`, the generated addresses will contain + the service DNS domain suffix (by default + `.cluster.local`, can be configured using + environment variable + `KUBERNETES_SERVICE_DNS_DOMAIN`). Defaults to + `false`.This field can be used only with + `internal` and `cluster-ip` type listeners. + description: Additional listener configuration. + networkPolicyPeers: + type: array + items: type: object properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: + ipBlock: + type: object + properties: + cidr: + type: string + except: + type: array + items: + type: string + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: type: array items: type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + podSelector: + type: object + properties: + matchExpressions: + type: array + items: type: object properties: - matchExpressions: + key: + type: string + operator: + type: string + values: type: array items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + List of peers which should be able to connect to + this listener. Peers in this list are combined + using a logical OR operation. If this field is + empty or missing, all connections will be allowed + for this listener. If this field is present and + contains at least one item, the listener only + allows the traffic which matches at least one item + in this list. + required: + - name + - port + - type + - tls + description: Configures listeners of Kafka brokers. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Kafka broker config properties with the following + prefixes cannot be set: listeners, advertised., broker., + listener., host.name, port, inter.broker.listener.name, + sasl., ssl., security., password., log.dir, + zookeeper.connect, zookeeper.set.acl, zookeeper.ssl, + zookeeper.clientCnxnSocket, authorizer., super.user, + cruise.control.metrics.topic, + cruise.control.metrics.reporter.bootstrap.servers,node.id, + process.roles, controller. (with the exception of: + zookeeper.connection.timeout.ms, + sasl.server.max.receive.size,ssl.cipher.suites, + ssl.protocol, ssl.enabled.protocols, + ssl.secure.random.implementation,cruise.control.metrics.topic.num.partitions, + cruise.control.metrics.topic.replication.factor, + cruise.control.metrics.topic.retention.ms,cruise.control.metrics.topic.auto.create.retries, + cruise.control.metrics.topic.auto.create.timeout.ms,cruise.control.metrics.topic.min.insync.replicas,controller.quorum.election.backoff.max.ms, + controller.quorum.election.timeout.ms, + controller.quorum.fetch.timeout.ms). + storage: + type: object + properties: + class: + type: string + description: >- + The storage class to use for dynamic volume + allocation. + deleteClaim: + type: boolean + description: >- + Specifies if the persistent volume claim has to be + deleted when the cluster is un-deployed. + id: + type: integer + minimum: 0 + description: >- + Storage identification number. It is mandatory only + for storage volumes defined in a storage of type + 'jbod'. + overrides: type: array items: type: object properties: - effect: - type: string - key: + class: type: string - operator: + description: >- + The storage class to use for dynamic volume + allocation for this broker. + broker: + type: integer + description: Id of the kafka broker (broker identifier). + description: >- + Overrides for individual brokers. The `overrides` + field allows to specify a different configuration + for different brokers. + selector: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Specifies a specific persistent volume to use. It + contains key:value pairs representing labels for + selecting such a volume. + size: + type: string + description: >- + When type=persistent-claim, defines the size of the + persistent volume claim (i.e 1Gi). Mandatory when + type=persistent-claim. + sizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + When type=ephemeral, defines the total amount of + local storage required for this EmptyDir volume (for + example 1Gi). + type: + type: string + enum: + - ephemeral + - persistent-claim + - jbod + description: >- + Storage type, must be either 'ephemeral', + 'persistent-claim', or 'jbod'. + volumes: + type: array + items: + type: object + properties: + class: type: string - tolerationSeconds: + description: >- + The storage class to use for dynamic volume + allocation. + deleteClaim: + type: boolean + description: >- + Specifies if the persistent volume claim has + to be deleted when the cluster is un-deployed. + id: type: integer - value: + minimum: 0 + description: >- + Storage identification number. It is mandatory + only for storage volumes defined in a storage + of type 'jbod'. + overrides: + type: array + items: + type: object + properties: + class: + type: string + description: >- + The storage class to use for dynamic + volume allocation for this broker. + broker: + type: integer + description: >- + Id of the kafka broker (broker + identifier). + description: >- + Overrides for individual brokers. The + `overrides` field allows to specify a + different configuration for different brokers. + selector: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Specifies a specific persistent volume to use. + It contains key:value pairs representing + labels for selecting such a volume. + size: type: string - description: The pod's tolerations. - priorityClassName: + description: >- + When type=persistent-claim, defines the size + of the persistent volume claim (i.e 1Gi). + Mandatory when type=persistent-claim. + sizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + When type=ephemeral, defines the total amount + of local storage required for this EmptyDir + volume (for example 1Gi). + type: + type: string + enum: + - ephemeral + - persistent-claim + description: >- + Storage type, must be either 'ephemeral' or + 'persistent-claim'. + required: + - type + description: >- + List of volumes as Storage objects representing the + JBOD disks array. + required: + - type + description: Storage configuration (disk). Cannot be updated. + authorization: + type: object + properties: + allowOnError: + type: boolean + description: >- + Defines whether a Kafka client should be allowed or + denied by default when the authorizer fails to query + the Open Policy Agent, for example, when it is + temporarily unavailable). Defaults to `false` - all + actions will be denied. + authorizerClass: + type: string + description: >- + Authorization implementation class, which must be + available in classpath. + clientId: type: string description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string + OAuth Client ID which the Kafka client can use to + authenticate against the OAuth server and use the + token endpoint URI. + connectTimeoutSeconds: + type: integer + minimum: 1 description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler will - be used. - hostAliases: + The connect timeout in seconds when connecting to + authorization server. If not set, the effective + connect timeout is 60 seconds. + delegateToKafkaAcls: + type: boolean + description: >- + Whether authorization decision should be delegated + to the 'Simple' authorizer if DENIED by Keycloak + Authorization Services policies. Default value is + `false`. + disableTlsHostnameVerification: + type: boolean + description: >- + Enable or disable TLS hostname verification. Default + value is `false`. + enableMetrics: + type: boolean + description: >- + Enable or disable OAuth metrics. Default value is + `false`. + expireAfterMs: + type: integer + description: >- + The expiration of the records kept in the local + cache to avoid querying the Open Policy Agent for + every request. Defines how often the cached + authorization decisions are reloaded from the Open + Policy Agent server. In milliseconds. Defaults to + `3600000`. + grantsRefreshPeriodSeconds: + type: integer + minimum: 0 + description: >- + The time between two consecutive grants refresh runs + in seconds. The default value is 60. + grantsRefreshPoolSize: + type: integer + minimum: 1 + description: >- + The number of threads to use to refresh grants for + active sessions. The more threads, the more + parallelism, so the sooner the job completes. + However, using more threads places a heavier load on + the authorization server. The default value is 5. + httpRetries: + type: integer + minimum: 0 + description: >- + The maximum number of retries to attempt if an + initial HTTP request fails. If not set, the default + is to not attempt any retries. + initialCacheCapacity: + type: integer + description: >- + Initial capacity of the local cache used by the + authorizer to avoid querying the Open Policy Agent + for every request Defaults to `5000`. + maximumCacheSize: + type: integer + description: >- + Maximum capacity of the local cache used by the + authorizer to avoid querying the Open Policy Agent + for every request. Defaults to `50000`. + readTimeoutSeconds: + type: integer + minimum: 1 + description: >- + The read timeout in seconds when connecting to + authorization server. If not set, the effective read + timeout is 60 seconds. + superUsers: type: array items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string + type: string description: >- - The pod's HostAliases. HostAliases is an optional - list of hosts and IPs that will be injected into the - Pod's hosts file if specified. - enableServiceLinks: + List of super users, which are user principals with + unlimited access rights. + supportsAdminApi: type: boolean description: >- - Indicates whether information about services should - be injected into Pod's environment variables. - topologySpreadConstraints: + Indicates whether the custom authorizer supports the + APIs for managing ACLs using the Kafka Admin API. + Defaults to `false`. + tlsTrustedCertificates: type: array items: type: object properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: + certificate: type: string - whenUnsatisfiable: + description: >- + The name of the file certificate in the + Secret. + secretName: type: string - description: The pod's topology spread constraints. - description: Template for Kafka Connect `Pods`. - apiService: + description: >- + The name of the Secret containing the + certificate. + required: + - certificate + - secretName + description: >- + Trusted certificates for TLS connection to the OAuth + server. + tokenEndpointUri: + type: string + description: Authorization server token endpoint URI. + type: + type: string + enum: + - simple + - opa + - keycloak + - custom + description: >- + Authorization type. Currently, the supported types + are `simple`, `keycloak`, `opa` and `custom`. + `simple` authorization type uses Kafka's + `kafka.security.authorizer.AclAuthorizer` class for + authorization. `keycloak` authorization type uses + Keycloak Authorization Services for authorization. + `opa` authorization type uses Open Policy Agent + based authorization.`custom` authorization type uses + user-provided implementation for authorization. + url: + type: string + example: 'http://opa:8181/v1/data/kafka/authz/allow' + description: >- + The URL used to connect to the Open Policy Agent + server. The URL has to include the policy which will + be queried by the authorizer. This option is + required. + required: + - type + description: Authorization configuration for Kafka brokers. + rack: type: object properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: + topologyKey: type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack + example: topology.kubernetes.io/zone + description: >- + A key that matches labels assigned to the Kubernetes + cluster nodes. The value of the label is used to set + a broker's `broker.rack` config, and the + `client.rack` config for Kafka Connect or + MirrorMaker 2.0. + required: + - topologyKey + description: Configuration of the `broker.rack` broker config. + brokerRackInitImage: + type: string + description: >- + The image of the init container used for initializing + the `broker.rack`. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 description: >- - Specifies the IP Family Policy used by the service. - Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP family - on single-stack clusters. `RequireDualStack` fails - unless there are two IP families on dual-stack - configured clusters. If unspecified, Kubernetes will - choose the default value based on the service type. - Available on Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for Kafka Connect API `Service`. - buildConfig: + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: type: object properties: - metadata: + '-XX': + x-kubernetes-preserve-unknown-fields: true type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: >- - Template for the Kafka Connect BuildConfig used to build - new container images. The BuildConfig is used only on - OpenShift. - buildContainer: - type: object - properties: - env: + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean + description: >- + Specifies whether the Garbage Collection logging is + enabled. The default is false. + javaSystemProperties: type: array items: type: object properties: name: type: string - description: The environment variable key. + description: The system property name. value: type: string - description: The environment variable value. + description: The system property value. description: >- - Environment variables which should be applied to the - container. - securityContext: + A map of additional system properties which will be + passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: + type: object + properties: + authentication: type: object properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: + type: type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: >- - Template for the Kafka Connect Build container. The - build container is used only on Kubernetes. - buildPod: + enum: + - password + description: >- + Authentication type. Currently the only + supported types are `password`.`password` type + creates a username and protected port with no + TLS. + required: + - type + description: >- + Authentication configuration for connecting to the + JMX port. + description: JMX Options for Kafka brokers. + resources: type: object properties: - metadata: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + metricsConfig: + type: object + properties: + type: + type: string + enum: + - jmxPrometheusExporter + description: >- + Metrics type. Only 'jmxPrometheusExporter' supported + currently. + valueFrom: type: object properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true + configMapKeyRef: type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string + Reference to the key in the ConfigMap containing + the configuration. description: >- - List of references to secrets in the same namespace - to use for pulling any of the images used by this - Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` - environment variable in Cluster Operator and the - `imagePullSecrets` option are specified, only the - `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. - securityContext: + ConfigMap entry where the Prometheus JMX Exporter + configuration is stored. For details of the + structure of this configuration, see the + {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: 'Logging type, must be either ''inline'' or ''external''.' + valueFrom: + type: object + properties: + configMapKeyRef: type: object properties: - level: - type: string - role: - type: string - type: + key: type: string - user: + name: type: string - seccompProfile: + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing + the configuration. + description: >- + `ConfigMap` entry where the logging configuration is + stored. + required: + - type + description: Logging configuration for Kafka. + template: + type: object + properties: + statefulset: + type: object + properties: + metadata: type: object properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + podManagementPolicy: + type: string + enum: + - OrderedReady + - Parallel + description: >- + PodManagementPolicy which will be used for this + StatefulSet. Valid values are `Parallel` and + `OrderedReady`. Defaults to `Parallel`. + description: Template for Kafka `StatefulSet`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: type: array items: type: object properties: name: type: string - value: - type: string - windowsOptions: + description: >- + List of references to secrets in the same + namespace to use for pulling any of the images + used by this Pod. When the + `STRIMZI_IMAGE_PULL_SECRETS` environment + variable in Cluster Operator and the + `imagePullSecrets` option are specified, only + the `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is + ignored. + securityContext: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: + fsGroup: + type: integer + fsGroupChangePolicy: type: string - description: >- - Configures pod-level security attributes and common - container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds after - the processes running in the pod are sent a - termination signal, and the time when the processes - are forcibly halted with a kill signal. Set this - value to longer than the expected cleanup time for - your process. Value must be a non-negative integer. - A zero value indicates delete immediately. You might - need to increase the grace period for very large - Kafka clusters, so that the Kafka brokers have - enough time to transfer their work to another broker - before they are terminated. Defaults to 30 seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: type: array items: type: object properties: - preference: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and + common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds + after the processes running in the pod are sent + a termination signal, and the time when the + processes are forcibly halted with a kill + signal. Set this value to longer than the + expected cleanup time for your process. Value + must be a non-negative integer. A zero value + indicates delete immediately. You might need to + increase the grace period for very large Kafka + clusters, so that the Kafka brokers have enough + time to transfer their work to another broker + before they are terminated. Defaults to 30 + seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: type: object properties: - nodeSelectorTerms: + preferredDuringSchedulingIgnoredDuringExecution: type: array items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: labelSelector: @@ -13303,53 +10635,91 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: type: string topologyKey: type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true + podAffinityTerm: type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: labelSelector: @@ -13371,1963 +10741,2701 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: type: string topologyKey: type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: >- + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: >- + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler + will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an + optional list of hosts and IPs that will be + injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir + volume (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services + should be injected into Pod's environment + variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: type: object properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: + matchExpressions: type: array items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler will - be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an optional - list of hosts and IPs that will be injected into the - Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services should - be injected into Pod's environment variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: type: array items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: >- - Template for Kafka Connect Build `Pods`. The build pod - is used only on Kubernetes. - clusterRoleBinding: - type: object - properties: - metadata: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka `Pods`. + bootstrapService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the + service. Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP + family on single-stack clusters. + `RequireDualStack` fails unless there are two IP + families on dual-stack configured clusters. If + unspecified, Kubernetes will choose the default + value based on the service type. Available on + Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Kafka bootstrap `Service`. + brokersService: type: object properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true + metadata: type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: Template for the Kafka Connect ClusterRoleBinding. - connectContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: + Specifies the IP Family Policy used by the + service. Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP + family on single-stack clusters. + `RequireDualStack` fails unless there are two IP + families on dual-stack configured clusters. If + unspecified, Kubernetes will choose the default + value based on the service type. Available on + Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Kafka broker `Service`. + externalBootstrapService: type: object properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: + metadata: type: object properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka external bootstrap `Service`. + perPodService: + type: object + properties: + metadata: type: object properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: >- + Template for Kafka per-pod `Services` used for + access from outside of Kubernetes. + externalBootstrapRoute: + type: object + properties: + metadata: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka Connect container. - initContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka external bootstrap `Route`. + perPodRoute: type: object properties: - allowPrivilegeEscalation: - type: boolean - capabilities: + metadata: type: object properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: >- + Template for Kafka per-pod `Routes` used for access + from outside of OpenShift. + externalBootstrapIngress: + type: object + properties: + metadata: type: object properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka external bootstrap `Ingress`. + perPodIngress: + type: object + properties: + metadata: type: object properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: >- + Template for Kafka per-pod `Ingress` used for access + from outside of Kubernetes. + persistentVolumeClaim: + type: object + properties: + metadata: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka init container. - podDisruptionBudget: - type: object - properties: - metadata: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for all Kafka `PersistentVolumeClaims`. + podDisruptionBudget: type: object properties: - labels: - x-kubernetes-preserve-unknown-fields: true + metadata: type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is allowed - when the `maxUnavailable` number of pods or fewer - are unavailable after the eviction. Setting this - value to 0 prevents all voluntary evictions, so the - pods must be evicted manually. Defaults to 1. - description: Template for Kafka Connect `PodDisruptionBudget`. - description: >- - Template for Kafka Connect and Kafka Connect S2I resources. - The template allows users to specify how the `Deployment`, - `Pods` and `Service` are generated. - externalConfiguration: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: >- - Name of the environment variable which will be - passed to the Kafka Connect pods. The name of the - environment variable cannot start with `KAFKA_` or - `STRIMZI_`. - valueFrom: - type: object - properties: - configMapKeyRef: + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is + allowed when the `maxUnavailable` number of pods + or fewer are unavailable after the eviction. + Setting this value to 0 prevents all voluntary + evictions, so the pods must be evicted manually. + Defaults to 1. + description: Template for Kafka `PodDisruptionBudget`. + kafkaContainer: + type: object + properties: + env: + type: array + items: type: object properties: - key: - type: string name: type: string - optional: - type: boolean - description: Reference to a key in a ConfigMap. - secretKeyRef: - type: object - properties: - key: - type: string - name: + description: The environment variable key. + value: type: string - optional: - type: boolean - description: Reference to a key in a Secret. - description: >- - Value of the environment variable which will be - passed to the Kafka Connect pods. It can be passed - either as a reference to Secret or ConfigMap - field. The field has to specify exactly one Secret - or ConfigMap. - required: - - name - - valueFrom - description: >- - Allows to pass data from Secret or ConfigMap to the - Kafka Connect pods as environment variables. - volumes: - type: array - items: - type: object - properties: - configMap: - type: object - properties: - defaultMode: - type: integer - items: - type: array - items: + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: type: object properties: - key: + level: type: string - mode: - type: integer - path: + role: type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to a key in a ConfigMap. Exactly one - Secret or ConfigMap has to be specified. - name: - type: string - description: >- - Name of the volume which will be added to the - Kafka Connect pods. - secret: - type: object - properties: - defaultMode: - type: integer - items: - type: array - items: + type: + type: string + user: + type: string + seccompProfile: type: object properties: - key: + localhostProfile: type: string - mode: - type: integer - path: + type: type: string - optional: - type: boolean - secretName: - type: string - description: >- - Reference to a key in a Secret. Exactly one Secret - or ConfigMap has to be specified. - required: - - name - description: >- - Allows to pass data from Secret or ConfigMap to the - Kafka Connect pods as volumes. - description: >- - Pass data from Secrets or ConfigMaps to the Kafka Connect - pods and use them to configure connectors. - build: - type: object - properties: - output: - type: object - properties: - additionalKanikoOptions: - type: array - items: - type: string - description: >- - Configures additional options which will be passed - to the Kaniko executor when building the new Connect - image. Allowed options are: --customPlatform, - --insecure, --insecure-pull, --insecure-registry, - --log-format, --log-timestamp, --registry-mirror, - --reproducible, --single-snapshot, - --skip-tls-verify, --skip-tls-verify-pull, - --skip-tls-verify-registry, --verbosity, - --snapshotMode, --use-new-run. These options will be - used only on Kubernetes where the Kaniko executor is - used. They will be ignored on OpenShift. The options - are described in the - link:https://github.com/GoogleContainerTools/kaniko[Kaniko - GitHub repository^]. Changing this field does not - trigger new build of the Kafka Connect image. - image: - type: string - description: The name of the image which will be built. Required. - pushSecret: - type: string - description: >- - Container Registry Secret with the credentials for - pushing the newly built image. - type: - type: string - enum: - - docker - - imagestream - description: >- - Output type. Must be either `docker` for pushing the - newly build image to Docker compatible registry or - `imagestream` for pushing the image to OpenShift - ImageStream. Required. - required: - - image - - type - description: >- - Configures where should the newly built image be stored. - Required. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka broker container. + initContainer: type: object - description: CPU and memory resources to reserve for the build. - plugins: - type: array - items: - type: object - properties: - name: - type: string - pattern: '^[a-z0-9][-_a-z0-9]*[a-z0-9]$' - description: >- - The unique name of the connector plugin. Will be - used to generate the path where the connector - artifacts will be stored. The name has to be - unique within the KafkaConnect resource. The name - has to follow the following pattern: - `^[a-z][-_a-z0-9]*[a-z]$`. Required. - artifacts: - type: array - items: + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: type: object properties: - sha512sum: - type: string - description: >- - SHA512 checksum of the artifact. Optional. - If specified, the checksum will be verified - while building the new container. If not - specified, the downloaded artifact will not - be verified. - type: - type: string - enum: - - jar - - tgz - - zip - description: >- - Artifact type. Currently, the supported - artifact types are `tgz`, `jar`, and `zip`. - url: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: type: string - pattern: >- - ^(https?|ftp)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|] - description: >- - URL of the artifact which will be - downloaded. Strimzi does not do any security - scanning of the downloaded artifacts. For - security reasons, you should first verify - the artifacts manually and configure the - checksum verification to make sure the same - artifact is used in the automated build. - Required. - required: - - type - - url - description: >- - List of artifacts which belong to this connector - plugin. Required. - required: - - name - - artifacts - description: >- - List of connector plugins which should be added to the - Kafka Connect. Required. - required: - - output - - plugins - description: >- - Configures how the Connect container image should be built. - Optional. - clientRackInitImage: - type: string - description: >- - The image of the init container used for initializing the - `client.rack`. - insecureSourceRepository: - type: boolean - description: >- - When true this configures the source repository with the - 'Local' reference policy and an import policy that accepts - insecure source tags. - metricsConfig: - type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka init container. + clusterCaCert: type: object properties: - key: - type: string - name: - type: string - optional: - type: boolean + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the structure of - this configuration, see the {JMXExporter}. - required: - - type - - valueFrom - description: Metrics configuration. - rack: - type: object - properties: - topologyKey: - type: string - example: topology.kubernetes.io/zone - description: >- - A key that matches labels assigned to the Kubernetes - cluster nodes. The value of the label is used to set the - broker's `broker.rack` config and `client.rack` in Kafka - Connect. - required: - - topologyKey - description: >- - Configuration of the node label which will be used as the - client.rack consumer configuration. - required: - - bootstrapServers - description: >- - The specification of the Kafka Connect Source-to-Image (S2I) - cluster. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - url: - type: string - description: >- - The URL of the REST API endpoint for managing and monitoring - Kafka Connect connectors. - connectorPlugins: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The type of the connector plugin. The available types - are `sink` and `source`. - version: - type: string - description: The version of the connector plugin. - class: - type: string - description: The class of the connector plugin. - description: >- - The list of connector plugins available in this Kafka - Connect deployment. - buildConfigName: - type: string - description: The name of the build configuration. - labelSelector: - type: string - description: Label selector for pods providing this resource. - replicas: - type: integer - description: >- - The current number of pods being used to provide this - resource. - description: The status of the Kafka Connect Source-to-Image (S2I) cluster. - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkaconnects.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaConnect - listKind: KafkaConnectList - singular: kafkaconnect - plural: kafkaconnects - shortNames: - - kc - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - scale: - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - labelSelectorPath: .status.labelSelector - additionalPrinterColumns: - - name: Desired replicas - description: The desired number of Kafka Connect replicas - jsonPath: .spec.replicas - type: integer - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - version: - type: string - description: >- - The Kafka Connect version. Defaults to - {DefaultKafkaVersion}. Consult the user documentation to - understand the process required to upgrade or downgrade the - version. - replicas: - type: integer - description: The number of pods in the Kafka Connect group. - image: - type: string - description: The docker image for the pods. - bootstrapServers: - type: string - description: >- - Bootstrap servers to connect to. This should be given as a - comma separated list of __:‍__ pairs. - tls: - type: object - properties: - trustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - secretName: - type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - secretName - description: Trusted certificates for TLS connection. - description: TLS configuration. - authentication: + Template for Secret with Kafka Cluster certificate + public key. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka service account. + jmxSecret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: >- + Template for Secret of the Kafka Cluster JMX + authentication. + clusterRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka ClusterRoleBinding. + podSet: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka `StrimziPodSet` resource. + description: >- + Template for Kafka cluster resources. The template + allows users to specify how the `StatefulSet`, `Pods`, + and `Services` are generated. + required: + - replicas + - listeners + - storage + description: Configuration of the Kafka cluster. + zookeeper: type: object properties: - accessToken: + replicas: + type: integer + minimum: 1 + description: The number of pods in the cluster. + image: + type: string + description: The docker image for the pods. + storage: type: object properties: - key: + class: type: string description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: - type: string + The storage class to use for dynamic volume + allocation. + deleteClaim: + type: boolean description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the access token - which was obtained from the authorization server. - accessTokenIsJwt: - type: boolean - description: >- - Configure whether access token should be treated as JWT. - This should be set to `false` if the authorization - server returns opaque tokens. Defaults to `true`. - certificateAndKey: - type: object - properties: - certificate: + Specifies if the persistent volume claim has to be + deleted when the cluster is un-deployed. + id: + type: integer + minimum: 0 + description: >- + Storage identification number. It is mandatory only + for storage volumes defined in a storage of type + 'jbod'. + overrides: + type: array + items: + type: object + properties: + class: + type: string + description: >- + The storage class to use for dynamic volume + allocation for this broker. + broker: + type: integer + description: Id of the kafka broker (broker identifier). + description: >- + Overrides for individual brokers. The `overrides` + field allows to specify a different configuration + for different brokers. + selector: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Specifies a specific persistent volume to use. It + contains key:value pairs representing labels for + selecting such a volume. + size: type: string - description: The name of the file certificate in the Secret. - key: + description: >- + When type=persistent-claim, defines the size of the + persistent volume claim (i.e 1Gi). Mandatory when + type=persistent-claim. + sizeLimit: type: string - description: The name of the private key in the Secret. - secretName: + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + When type=ephemeral, defines the total amount of + local storage required for this EmptyDir volume (for + example 1Gi). + type: type: string - description: The name of the Secret containing the certificate. + enum: + - ephemeral + - persistent-claim + description: >- + Storage type, must be either 'ephemeral' or + 'persistent-claim'. required: - - certificate - - key - - secretName - description: >- - Reference to the `Secret` which holds the certificate - and private key pair. - clientId: - type: string + - type + description: Storage configuration (disk). Cannot be updated. + config: + x-kubernetes-preserve-unknown-fields: true + type: object description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the token - endpoint URI. - clientSecret: + The ZooKeeper broker config. Properties with the + following prefixes cannot be set: server., dataDir, + dataLogDir, clientPort, authProvider, quorum.auth, + requireClientAuthScheme, snapshot.trust.empty, + standaloneEnabled, reconfigEnabled, + 4lw.commands.whitelist, secureClientPort, ssl., + serverCnxnFactory, sslQuorum (with the exception of: + ssl.protocol, ssl.quorum.protocol, ssl.enabledProtocols, + ssl.quorum.enabledProtocols, ssl.ciphersuites, + ssl.quorum.ciphersuites, ssl.hostnameVerification, + ssl.quorum.hostnameVerification). + livenessProbe: type: object properties: - key: - type: string + failureThreshold: + type: integer + minimum: 1 description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: - type: string + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth client - secret which the Kafka client can use to authenticate - against the OAuth server and use the token endpoint URI. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. Default - value is `false`. - maxTokenExpirySeconds: - type: integer - description: >- - Set or limit time-to-live of the access tokens to the - specified number of seconds. This should be set if the - authorization server returns opaque tokens. - passwordSecret: + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: type: object properties: - password: - type: string + failureThreshold: + type: integer + minimum: 1 description: >- - The name of the key in the Secret under which the - password is stored. - secretName: - type: string - description: The name of the Secret containing the password. - required: - - password - - secretName - description: Reference to the `Secret` which holds the password. - refreshToken: + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + jvmOptions: type: object properties: - key: + '-XX': + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + '-Xms': type: string - description: >- - The key under which the secret value is stored in - the Kubernetes Secret. - secretName: + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the refresh token - which can be used to obtain access token from the - authorization server. - scope: - type: string - description: >- - OAuth scope to use when authenticating against the - authorization server. Some authorization servers require - this to be set. The possible values depend on how - authorization server is configured. By default `scope` - is not specified when doing the token endpoint request. - tlsTrustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - secretName: - type: string - description: The name of the Secret containing the certificate. - required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the OAuth - server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - tls - - scram-sha-512 - - plain - - oauth - description: >- - Authentication type. Currently the only supported types - are `tls`, `scram-sha-512`, and `plain`. `scram-sha-512` - type uses SASL SCRAM-SHA-512 Authentication. `plain` - type uses SASL PLAIN Authentication. `oauth` type uses - SASL OAUTHBEARER Authentication. The `tls` type uses TLS - Client Authentication. The `tls` type is supported only - over TLS connections. - username: - type: string - description: Username used for the authentication. - required: - - type - description: Authentication configuration for Kafka Connect. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The Kafka Connect configuration. Properties with the - following prefixes cannot be set: ssl., sasl., security., - listeners, plugin.path, rest., bootstrap.servers, - consumer.interceptor.classes, producer.interceptor.classes - (with the exception of: - ssl.endpoint.identification.algorithm, ssl.cipher.suites, - ssl.protocol, ssl.enabled.protocols). - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true + Specifies whether the Garbage Collection logging is + enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: >- + A map of additional system properties which will be + passed using the `-D` option to the JVM. + description: JVM Options for pods. + jmxOptions: type: object - description: >- - The maximum limits for CPU and memory resources and the - requested initial resources. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod readiness checking. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true + properties: + authentication: + type: object + properties: + type: + type: string + enum: + - password + description: >- + Authentication type. Currently the only + supported types are `password`.`password` type + creates a username and protected port with no + TLS. + required: + - type + description: >- + Authentication configuration for connecting to the + JMX port. + description: JMX Options for Zookeeper nodes. + resources: type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: JVM Options for pods. - jmxOptions: - type: object - properties: - authentication: + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + metricsConfig: type: object properties: type: type: string enum: - - password + - jmxPrometheusExporter description: >- - Authentication type. Currently the only supported - types are `password`.`password` type creates a - username and protected port with no TLS. + Metrics type. Only 'jmxPrometheusExporter' supported + currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing + the configuration. + description: >- + ConfigMap entry where the Prometheus JMX Exporter + configuration is stored. For details of the + structure of this configuration, see the + {JMXExporter}. required: - type - description: >- - Authentication configuration for connecting to the JMX - port. - description: JMX Options. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true + - valueFrom + description: Metrics configuration. + logging: type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: 'Logging type, must be either ''inline'' or ''external''.' + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing + the configuration. + description: >- + `ConfigMap` entry where the logging configuration is + stored. + required: + - type + description: Logging configuration for ZooKeeper. + template: type: object properties: - configMapKeyRef: + statefulset: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + podManagementPolicy: + type: string + enum: + - OrderedReady + - Parallel + description: >- + PodManagementPolicy which will be used for this + StatefulSet. Valid values are `Parallel` and + `OrderedReady`. Defaults to `Parallel`. + description: Template for ZooKeeper `StatefulSet`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same + namespace to use for pulling any of the images + used by this Pod. When the + `STRIMZI_IMAGE_PULL_SECRETS` environment + variable in Cluster Operator and the + `imagePullSecrets` option are specified, only + the `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is + ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and + common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds + after the processes running in the pod are sent + a termination signal, and the time when the + processes are forcibly halted with a kill + signal. Set this value to longer than the + expected cleanup time for your process. Value + must be a non-negative integer. A zero value + indicates delete immediately. You might need to + increase the grace period for very large Kafka + clusters, so that the Kafka brokers have enough + time to transfer their work to another broker + before they are terminated. Defaults to 30 + seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: >- + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: >- + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler + will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an + optional list of hosts and IPs that will be + injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir + volume (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services + should be injected into Pod's environment + variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for ZooKeeper `Pods`. + clientService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the + service. Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP + family on single-stack clusters. + `RequireDualStack` fails unless there are two IP + families on dual-stack configured clusters. If + unspecified, Kubernetes will choose the default + value based on the service type. Available on + Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for ZooKeeper client `Service`. + nodesService: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: + type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack + description: >- + Specifies the IP Family Policy used by the + service. Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP + family on single-stack clusters. + `RequireDualStack` fails unless there are two IP + families on dual-stack configured clusters. If + unspecified, Kubernetes will choose the default + value based on the service type. Available on + Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for ZooKeeper nodes `Service`. + persistentVolumeClaim: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for all ZooKeeper `PersistentVolumeClaims`. + podDisruptionBudget: type: object properties: - key: - type: string - name: - type: string - optional: - type: boolean + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: >- + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 + description: >- + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is + allowed when the `maxUnavailable` number of pods + or fewer are unavailable after the eviction. + Setting this value to 0 prevents all voluntary + evictions, so the pods must be evicted manually. + Defaults to 1. + description: Template for ZooKeeper `PodDisruptionBudget`. + zookeeperContainer: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the ZooKeeper container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the ZooKeeper service account. + jmxSecret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration for Kafka Connect. - tracing: - type: object - properties: - type: - type: string - enum: - - jaeger + Template for Secret of the Zookeeper Cluster JMX + authentication. + podSet: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for ZooKeeper `StrimziPodSet` resource. description: >- - Type of the tracing used. Currently the only supported - type is `jaeger` for Jaeger tracing. + Template for ZooKeeper cluster resources. The template + allows users to specify how the `StatefulSet`, `Pods`, + and `Services` are generated. required: - - type - description: The configuration of tracing in Kafka Connect. - template: + - replicas + - storage + description: Configuration of the ZooKeeper cluster. + entityOperator: type: object properties: - deployment: + topicOperator: type: object properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - deploymentStrategy: + watchedNamespace: type: string - enum: - - RollingUpdate - - Recreate - description: >- - DeploymentStrategy which will be used for this - Deployment. Valid values are `RollingUpdate` and - `Recreate`. Defaults to `RollingUpdate`. - description: Template for Kafka Connect `Deployment`. - pod: - type: object - properties: - metadata: + description: The namespace the Topic Operator should watch. + image: + type: string + description: The image to use for the Topic Operator. + reconciliationIntervalSeconds: + type: integer + minimum: 0 + description: Interval between periodic reconciliations. + zookeeperSessionTimeoutSeconds: + type: integer + minimum: 0 + description: Timeout for the ZooKeeper session. + startupProbe: type: object properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object + failureThreshold: + type: integer + minimum: 1 description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same namespace - to use for pulling any of the images used by this - Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` - environment variable in Cluster Operator and the - `imagePullSecrets` option are specified, only the - `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. - securityContext: + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod startup checking. + livenessProbe: type: object properties: - fsGroup: + failureThreshold: type: integer - fsGroupChangePolicy: - type: string - runAsGroup: + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: type: integer - runAsNonRoot: - type: boolean - runAsUser: + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: - type: array - items: - type: object - properties: - name: - type: string - value: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: >- - Configures pod-level security attributes and common - container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds after - the processes running in the pod are sent a - termination signal, and the time when the processes - are forcibly halted with a kill signal. Set this - value to longer than the expected cleanup time for - your process. Value must be a non-negative integer. - A zero value indicates delete immediately. You might - need to increase the grace period for very large - Kafka clusters, so that the Kafka brokers have - enough time to transfer their work to another broker - before they are terminated. Defaults to 30 seconds. - affinity: + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: type: object properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - preference: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: object - properties: - nodeSelectorTerms: - type: array - items: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - podAffinity: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + topicMetadataMaxAttempts: + type: integer + minimum: 0 + description: The number of attempts at getting topic metadata. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: >- + Logging type, must be either 'inline' or + 'external'. + valueFrom: type: object properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler will - be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an optional - list of hosts and IPs that will be injected into the - Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services should - be injected into Pod's environment variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap + containing the configuration. + description: >- + `ConfigMap` entry where the logging + configuration is stored. + required: + - type + description: Logging configuration. + jvmOptions: + type: object + properties: + '-XX': + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean + description: >- + Specifies whether the Garbage Collection logging + is enabled. The default is false. + javaSystemProperties: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: Template for Kafka Connect `Pods`. - apiService: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: >- + A map of additional system properties which will + be passed using the `-D` option to the JVM. + description: JVM Options for pods. + description: Configuration of the Topic Operator. + userOperator: type: object properties: - metadata: + watchedNamespace: + type: string + description: The namespace the User Operator should watch. + image: + type: string + description: The image to use for the User Operator. + reconciliationIntervalSeconds: + type: integer + minimum: 0 + description: Interval between periodic reconciliations. + zookeeperSessionTimeoutSeconds: + type: integer + minimum: 0 + description: Timeout for the ZooKeeper session. + secretPrefix: + type: string + description: >- + The prefix that will be added to the KafkaUser name + to be used as the Secret name. + livenessProbe: type: object properties: - labels: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + logging: + type: object + properties: + loggers: x-kubernetes-preserve-unknown-fields: true type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: + Logging type, must be either 'inline' or + 'external'. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap + containing the configuration. + description: >- + `ConfigMap` entry where the logging + configuration is stored. + required: + - type + description: Logging configuration. + jvmOptions: + type: object + properties: + '-XX': x-kubernetes-preserve-unknown-fields: true type: object + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - ipFamilyPolicy: - type: string - enum: - - SingleStack - - PreferDualStack - - RequireDualStack - description: >- - Specifies the IP Family Policy used by the service. - Available options are `SingleStack`, - `PreferDualStack` and `RequireDualStack`. - `SingleStack` is for a single IP family. - `PreferDualStack` is for two IP families on - dual-stack configured clusters or a single IP family - on single-stack clusters. `RequireDualStack` fails - unless there are two IP families on dual-stack - configured clusters. If unspecified, Kubernetes will - choose the default value based on the service type. - Available on Kubernetes 1.20 and newer. - ipFamilies: - type: array - items: - type: string - enum: - - IPv4 - - IPv6 - description: >- - Specifies the IP Families used by the service. - Available options are `IPv4` and `IPv6. If - unspecified, Kubernetes will choose the default - value based on the `ipFamilyPolicy` setting. - Available on Kubernetes 1.20 and newer. - description: Template for Kafka Connect API `Service`. - buildConfig: + Specifies whether the Garbage Collection logging + is enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: >- + A map of additional system properties which will + be passed using the `-D` option to the JVM. + description: JVM Options for pods. + description: Configuration of the User Operator. + tlsSidecar: type: object properties: - metadata: + image: + type: string + description: The docker image for the container. + livenessProbe: type: object properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object + failureThreshold: + type: integer + minimum: 1 description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: >- - Template for the Kafka Connect BuildConfig used to build - new container images. The BuildConfig is used only on - OpenShift. - buildContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + logLevel: + type: string + enum: + - emerg + - alert + - crit + - err + - warning + - notice + - info + - debug description: >- - Environment variables which should be applied to the - container. - securityContext: + The log level for the TLS sidecar. Default value is + `notice`. + readinessProbe: type: object properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: + failureThreshold: type: integer - runAsNonRoot: - type: boolean - runAsUser: + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: + requests: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: >- - Template for the Kafka Connect Build container. The - build container is used only on Kubernetes. - buildPod: + description: CPU and memory resources to reserve. + description: TLS sidecar configuration. + template: type: object properties: - metadata: + deployment: type: object properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true + metadata: type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same namespace - to use for pulling any of the images used by this - Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` - environment variable in Cluster Operator and the - `imagePullSecrets` option are specified, only the - `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. - securityContext: + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for Entity Operator `Deployment`. + pod: type: object properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + metadata: type: object properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: type: array items: type: object properties: name: type: string - value: - type: string - windowsOptions: + description: >- + List of references to secrets in the same + namespace to use for pulling any of the images + used by this Pod. When the + `STRIMZI_IMAGE_PULL_SECRETS` environment + variable in Cluster Operator and the + `imagePullSecrets` option are specified, only + the `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is + ignored. + securityContext: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: + fsGroup: + type: integer + fsGroupChangePolicy: type: string - description: >- - Configures pod-level security attributes and common - container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds after - the processes running in the pod are sent a - termination signal, and the time when the processes - are forcibly halted with a kill signal. Set this - value to longer than the expected cleanup time for - your process. Value must be a non-negative integer. - A zero value indicates delete immediately. You might - need to increase the grace period for very large - Kafka clusters, so that the Kafka brokers have - enough time to transfer their work to another broker - before they are terminated. Defaults to 30 seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: type: array items: type: object properties: - preference: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and + common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds + after the processes running in the pod are sent + a termination signal, and the time when the + processes are forcibly halted with a kill + signal. Set this value to longer than the + expected cleanup time for your process. Value + must be a non-negative integer. A zero value + indicates delete immediately. You might need to + increase the grace period for very large Kafka + clusters, so that the Kafka brokers have enough + time to transfer their work to another broker + before they are terminated. Defaults to 30 + seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: type: object properties: - nodeSelectorTerms: + preferredDuringSchedulingIgnoredDuringExecution: type: array items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: labelSelector: @@ -15349,56 +13457,7 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: - type: object - properties: - labelSelector: + namespaceSelector: type: object properties: matchExpressions: @@ -15423,1618 +13482,2953 @@ spec: type: string topologyKey: type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true + podAffinityTerm: type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - description: The pod's affinity rules. - tolerations: - type: array - items: - type: object - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - type: integer - value: - type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler will - be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: - type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an optional - list of hosts and IPs that will be injected into the - Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services should - be injected into Pod's environment variables. - topologySpreadConstraints: - type: array - items: - type: object - properties: - labelSelector: + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - description: The pod's topology spread constraints. - description: >- - Template for Kafka Connect Build `Pods`. The build pod - is used only on Kubernetes. - clusterRoleBinding: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - description: Template for the Kafka Connect ClusterRoleBinding. - connectContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: + effect: type: string - drop: - type: array - items: + key: type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka Connect container. - initContainer: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: - type: object - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: + operator: type: string - drop: - type: array - items: + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: >- + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: >- + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler + will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: type: string - privileged: - type: boolean - procMount: + description: >- + The pod's HostAliases. HostAliases is an + optional list of hosts and IPs that will be + injected into the Pod's hosts file if specified. + tmpDirSizeLimit: type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: - type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: - type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: - type: object - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - description: Security context for the container. - description: Template for the Kafka init container. - podDisruptionBudget: - type: object - properties: - metadata: - type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir + volume (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is allowed - when the `maxUnavailable` number of pods or fewer - are unavailable after the eviction. Setting this - value to 0 prevents all voluntary evictions, so the - pods must be evicted manually. Defaults to 1. - description: Template for Kafka Connect `PodDisruptionBudget`. - description: >- - Template for Kafka Connect and Kafka Connect S2I resources. - The template allows users to specify how the `Deployment`, - `Pods` and `Service` are generated. - externalConfiguration: - type: object - properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: >- - Name of the environment variable which will be - passed to the Kafka Connect pods. The name of the - environment variable cannot start with `KAFKA_` or - `STRIMZI_`. - valueFrom: - type: object - properties: - configMapKeyRef: + Indicates whether information about services + should be injected into Pod's environment + variables. + topologySpreadConstraints: + type: array + items: type: object properties: - key: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: type: string - name: + nodeTaintsPolicy: type: string - optional: - type: boolean - description: Reference to a key in a ConfigMap. - secretKeyRef: + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Entity Operator `Pods`. + topicOperatorContainer: + type: object + properties: + env: + type: array + items: type: object properties: - key: - type: string name: type: string - optional: - type: boolean - description: Reference to a key in a Secret. - description: >- - Value of the environment variable which will be - passed to the Kafka Connect pods. It can be passed - either as a reference to Secret or ConfigMap - field. The field has to specify exactly one Secret - or ConfigMap. - required: - - name - - valueFrom - description: >- - Allows to pass data from Secret or ConfigMap to the - Kafka Connect pods as environment variables. - volumes: - type: array - items: - type: object - properties: - configMap: - type: object - properties: - defaultMode: - type: integer + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Entity Topic Operator container. + userOperatorContainer: + type: object + properties: + env: + type: array items: - type: array - items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: type: object properties: - key: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: type: string - mode: - type: integer - path: + role: type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to a key in a ConfigMap. Exactly one - Secret or ConfigMap has to be specified. - name: - type: string - description: >- - Name of the volume which will be added to the - Kafka Connect pods. - secret: - type: object - properties: - defaultMode: - type: integer - items: - type: array - items: + type: + type: string + user: + type: string + seccompProfile: type: object properties: - key: + localhostProfile: type: string - mode: - type: integer - path: + type: type: string - optional: - type: boolean - secretName: - type: string - description: >- - Reference to a key in a Secret. Exactly one Secret - or ConfigMap has to be specified. - required: - - name - description: >- - Allows to pass data from Secret or ConfigMap to the - Kafka Connect pods as volumes. - description: >- - Pass data from Secrets or ConfigMaps to the Kafka Connect - pods and use them to configure connectors. - build: - type: object - properties: - output: - type: object - properties: - additionalKanikoOptions: - type: array - items: - type: string - description: >- - Configures additional options which will be passed - to the Kaniko executor when building the new Connect - image. Allowed options are: --customPlatform, - --insecure, --insecure-pull, --insecure-registry, - --log-format, --log-timestamp, --registry-mirror, - --reproducible, --single-snapshot, - --skip-tls-verify, --skip-tls-verify-pull, - --skip-tls-verify-registry, --verbosity, - --snapshotMode, --use-new-run. These options will be - used only on Kubernetes where the Kaniko executor is - used. They will be ignored on OpenShift. The options - are described in the - link:https://github.com/GoogleContainerTools/kaniko[Kaniko - GitHub repository^]. Changing this field does not - trigger new build of the Kafka Connect image. - image: - type: string - description: The name of the image which will be built. Required. - pushSecret: - type: string - description: >- - Container Registry Secret with the credentials for - pushing the newly built image. - type: - type: string - enum: - - docker - - imagestream - description: >- - Output type. Must be either `docker` for pushing the - newly build image to Docker compatible registry or - `imagestream` for pushing the image to OpenShift - ImageStream. Required. - required: - - image - - type - description: >- - Configures where should the newly built image be stored. - Required. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Entity User Operator container. + tlsSidecarContainer: type: object - description: CPU and memory resources to reserve for the build. - plugins: - type: array - items: - type: object - properties: - name: - type: string - pattern: '^[a-z0-9][-_a-z0-9]*[a-z0-9]$' - description: >- - The unique name of the connector plugin. Will be - used to generate the path where the connector - artifacts will be stored. The name has to be - unique within the KafkaConnect resource. The name - has to follow the following pattern: - `^[a-z][-_a-z0-9]*[a-z]$`. Required. - artifacts: - type: array - items: + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: type: object properties: - sha512sum: - type: string - description: >- - SHA512 checksum of the artifact. Optional. - If specified, the checksum will be verified - while building the new container. If not - specified, the downloaded artifact will not - be verified. - type: - type: string - enum: - - jar - - tgz - - zip - description: >- - Artifact type. Currently, the supported - artifact types are `tgz`, `jar`, and `zip`. - url: - type: string - pattern: >- - ^(https?|ftp)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|] - description: >- - URL of the artifact which will be - downloaded. Strimzi does not do any security - scanning of the downloaded artifacts. For - security reasons, you should first verify - the artifacts manually and configure the - checksum verification to make sure the same - artifact is used in the automated build. - Required. - required: - - type - - url - description: >- - List of artifacts which belong to this connector - plugin. Required. - required: - - name - - artifacts - description: >- - List of connector plugins which should be added to the - Kafka Connect. Required. - required: - - output - - plugins - description: >- - Configures how the Connect container image should be built. - Optional. - clientRackInitImage: - type: string - description: >- - The image of the init container used for initializing the - `client.rack`. - metricsConfig: - type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: >- + Template for the Entity Operator TLS sidecar + container. + serviceAccount: type: object properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the structure of - this configuration, see the {JMXExporter}. - required: - - type - - valueFrom - description: Metrics configuration. - rack: - type: object - properties: - topologyKey: - type: string - example: topology.kubernetes.io/zone - description: >- - A key that matches labels assigned to the Kubernetes - cluster nodes. The value of the label is used to set the - broker's `broker.rack` config and `client.rack` in Kafka - Connect. - required: - - topologyKey - description: >- - Configuration of the node label which will be used as the - client.rack consumer configuration. - required: - - bootstrapServers - description: The specification of the Kafka Connect cluster. - status: - type: object - properties: - conditions: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The unique identifier of a condition, used to - distinguish between other conditions in the resource. - status: - type: string - description: >- - The status of the condition, either True, False or - Unknown. - lastTransitionTime: - type: string - description: >- - Last time the condition of a type changed from one - status to another. The required format is - 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. - reason: - type: string - description: >- - The reason for the condition's last transition (a - single word in CamelCase). - message: - type: string - description: >- - Human-readable message indicating details about the - condition's last transition. - description: List of status conditions. - observedGeneration: - type: integer - description: >- - The generation of the CRD that was last reconciled by the - operator. - url: - type: string - description: >- - The URL of the REST API endpoint for managing and monitoring - Kafka Connect connectors. - connectorPlugins: - type: array - items: - type: object - properties: - type: - type: string - description: >- - The type of the connector plugin. The available types - are `sink` and `source`. - version: - type: string - description: The version of the connector plugin. - class: - type: string - description: The class of the connector plugin. - description: >- - The list of connector plugins available in this Kafka - Connect deployment. - labelSelector: - type: string - description: Label selector for pods providing this resource. - replicas: - type: integer - description: >- - The current number of pods being used to provide this - resource. - description: The status of the Kafka Connect cluster. - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kafkamirrormakers.kafka.strimzi.io - labels: - app: strimzi - strimzi.io/crd-install: 'true' -spec: - group: kafka.strimzi.io - names: - kind: KafkaMirrorMaker - listKind: KafkaMirrorMakerList - singular: kafkamirrormaker - plural: kafkamirrormakers - shortNames: - - kmm - categories: - - strimzi - scope: Namespaced - conversion: - strategy: None - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - scale: - specReplicasPath: .spec.replicas - statusReplicasPath: .status.replicas - labelSelectorPath: .status.labelSelector - additionalPrinterColumns: - - name: Desired replicas - description: The desired number of Kafka MirrorMaker replicas - jsonPath: .spec.replicas - type: integer - - name: Consumer Bootstrap Servers - description: The boostrap servers for the consumer - jsonPath: .spec.consumer.bootstrapServers - type: string - priority: 1 - - name: Producer Bootstrap Servers - description: The boostrap servers for the producer - jsonPath: .spec.producer.bootstrapServers - type: string - priority: 1 - - name: Ready - description: The state of the custom resource - jsonPath: '.status.conditions[?(@.type=="Ready")].status' - type: string - schema: - openAPIV3Schema: - type: object - properties: - spec: - type: object - properties: - version: - type: string - description: >- - The Kafka MirrorMaker version. Defaults to - {DefaultKafkaVersion}. Consult the documentation to - understand the process required to upgrade or downgrade the - version. - replicas: - type: integer - minimum: 0 - description: The number of pods in the `Deployment`. - image: - type: string - description: The docker image for the pods. - consumer: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Entity Operator service account. + entityOperatorRole: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Entity Operator Role. + topicOperatorRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Entity Topic Operator RoleBinding. + userOperatorRoleBinding: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Entity Topic Operator RoleBinding. + description: >- + Template for Entity Operator resources. The template + allows users to specify how a `Deployment` and `Pod` is + generated. + description: Configuration of the Entity Operator. + clusterCa: type: object properties: - numStreams: + generateCertificateAuthority: + type: boolean + description: >- + If true then Certificate Authority certificates will be + generated automatically. Otherwise the user will need to + provide a Secret with the CA certificate. Default is + true. + generateSecretOwnerReference: + type: boolean + description: >- + If `true`, the Cluster and Client CA Secrets are + configured with the `ownerReference` set to the `Kafka` + resource. If the `Kafka` resource is deleted when + `true`, the CA Secrets are also deleted. If `false`, the + `ownerReference` is disabled. If the `Kafka` resource is + deleted when `false`, the CA Secrets are retained and + available for reuse. Default is `true`. + validityDays: type: integer minimum: 1 description: >- - Specifies the number of consumer stream threads to - create. - offsetCommitInterval: + The number of days generated certificates should be + valid for. The default is 365. + renewalDays: type: integer + minimum: 1 description: >- - Specifies the offset auto-commit interval in ms. Default - value is 60000. - bootstrapServers: + The number of days in the certificate renewal period. + This is the number of days before the a certificate + expires during which renewal actions may be performed. + When `generateCertificateAuthority` is true, this will + cause the generation of a new certificate. When + `generateCertificateAuthority` is true, this will cause + extra logging at WARN level about the pending + certificate expiry. Default is 30. + certificateExpirationPolicy: type: string + enum: + - renew-certificate + - replace-key description: >- - A list of host:port pairs for establishing the initial - connection to the Kafka cluster. - groupId: + How should CA certificate expiration be handled when + `generateCertificateAuthority=true`. The default is for + a new CA certificate to be generated reusing the + existing private key. + description: Configuration of the cluster certificate authority. + clientsCa: + type: object + properties: + generateCertificateAuthority: + type: boolean + description: >- + If true then Certificate Authority certificates will be + generated automatically. Otherwise the user will need to + provide a Secret with the CA certificate. Default is + true. + generateSecretOwnerReference: + type: boolean + description: >- + If `true`, the Cluster and Client CA Secrets are + configured with the `ownerReference` set to the `Kafka` + resource. If the `Kafka` resource is deleted when + `true`, the CA Secrets are also deleted. If `false`, the + `ownerReference` is disabled. If the `Kafka` resource is + deleted when `false`, the CA Secrets are retained and + available for reuse. Default is `true`. + validityDays: + type: integer + minimum: 1 + description: >- + The number of days generated certificates should be + valid for. The default is 365. + renewalDays: + type: integer + minimum: 1 + description: >- + The number of days in the certificate renewal period. + This is the number of days before the a certificate + expires during which renewal actions may be performed. + When `generateCertificateAuthority` is true, this will + cause the generation of a new certificate. When + `generateCertificateAuthority` is true, this will cause + extra logging at WARN level about the pending + certificate expiry. Default is 30. + certificateExpirationPolicy: + type: string + enum: + - renew-certificate + - replace-key + description: >- + How should CA certificate expiration be handled when + `generateCertificateAuthority=true`. The default is for + a new CA certificate to be generated reusing the + existing private key. + description: Configuration of the clients certificate authority. + cruiseControl: + type: object + properties: + image: type: string + description: The docker image for the pods. + tlsSidecar: + type: object + properties: + image: + type: string + description: The docker image for the container. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod liveness checking. + logLevel: + type: string + enum: + - emerg + - alert + - crit + - err + - warning + - notice + - info + - debug + description: >- + The log level for the TLS sidecar. Default value is + `notice`. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is + first checked. Default to 15 seconds. Minimum + value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to + be considered successful after having failed. + Defaults to 1. Must be 1 for liveness. Minimum + value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. + Default to 5 seconds. Minimum value is 1. + description: Pod readiness checking. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + description: TLS sidecar configuration. + resources: + type: object + properties: + limits: + x-kubernetes-preserve-unknown-fields: true + type: object + requests: + x-kubernetes-preserve-unknown-fields: true + type: object description: >- - A unique string that identifies the consumer group this - consumer belongs to. - authentication: + CPU and memory resources to reserve for the Cruise + Control container. + livenessProbe: type: object properties: - accessToken: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod liveness checking for the Cruise Control container. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod readiness checking for the Cruise Control container. + jvmOptions: + type: object + properties: + '-XX': + x-kubernetes-preserve-unknown-fields: true + type: object + description: A map of -XX options to the JVM. + '-Xms': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xms option to to the JVM.' + '-Xmx': + type: string + pattern: '^[0-9]+[mMgG]?$' + description: '-Xmx option to to the JVM.' + gcLoggingEnabled: + type: boolean + description: >- + Specifies whether the Garbage Collection logging is + enabled. The default is false. + javaSystemProperties: + type: array + items: + type: object + properties: + name: + type: string + description: The system property name. + value: + type: string + description: The system property value. + description: >- + A map of additional system properties which will be + passed using the `-D` option to the JVM. + description: JVM Options for the Cruise Control container. + logging: + type: object + properties: + loggers: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A Map from logger name to logger level. + type: + type: string + enum: + - inline + - external + description: 'Logging type, must be either ''inline'' or ''external''.' + valueFrom: type: object properties: - key: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing + the configuration. + description: >- + `ConfigMap` entry where the logging configuration is + stored. + required: + - type + description: Logging configuration (Log4j 2) for Cruise Control. + template: + type: object + properties: + deployment: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: >- + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for Cruise Control `Deployment`. + pod: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same + namespace to use for pulling any of the images + used by this Pod. When the + `STRIMZI_IMAGE_PULL_SECRETS` environment + variable in Cluster Operator and the + `imagePullSecrets` option are specified, only + the `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is + ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and + common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds + after the processes running in the pod are sent + a termination signal, and the time when the + processes are forcibly halted with a kill + signal. Set this value to longer than the + expected cleanup time for your process. Value + must be a non-negative integer. A zero value + indicates delete immediately. You might need to + increase the grace period for very large Kafka + clusters, so that the Kafka brokers have enough + time to transfer their work to another broker + before they are terminated. Defaults to 30 + seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: type: string description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: type: string description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the access - token which was obtained from the authorization - server. - accessTokenIsJwt: - type: boolean - description: >- - Configure whether access token should be treated as - JWT. This should be set to `false` if the - authorization server returns opaque tokens. Defaults - to `true`. - certificateAndKey: + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler + will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an + optional list of hosts and IPs that will be + injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir + volume (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services + should be injected into Pod's environment + variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Cruise Control `Pods`. + apiService: type: object properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - key: - type: string - description: The name of the private key in the Secret. - secretName: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + ipFamilyPolicy: type: string + enum: + - SingleStack + - PreferDualStack + - RequireDualStack description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - key - - secretName - description: >- - Reference to the `Secret` which holds the - certificate and private key pair. - clientId: - type: string - description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - clientSecret: + Specifies the IP Family Policy used by the + service. Available options are `SingleStack`, + `PreferDualStack` and `RequireDualStack`. + `SingleStack` is for a single IP family. + `PreferDualStack` is for two IP families on + dual-stack configured clusters or a single IP + family on single-stack clusters. + `RequireDualStack` fails unless there are two IP + families on dual-stack configured clusters. If + unspecified, Kubernetes will choose the default + value based on the service type. Available on + Kubernetes 1.20 and newer. + ipFamilies: + type: array + items: + type: string + enum: + - IPv4 + - IPv6 + description: >- + Specifies the IP Families used by the service. + Available options are `IPv4` and `IPv6. If + unspecified, Kubernetes will choose the default + value based on the `ipFamilyPolicy` setting. + Available on Kubernetes 1.20 and newer. + description: Template for Cruise Control API `Service`. + podDisruptionBudget: type: object properties: - key: - type: string + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string + Metadata to apply to the + `PodDisruptionBudgetTemplate` resource. + maxUnavailable: + type: integer + minimum: 0 description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth - client secret which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. Default - value is `false`. - maxTokenExpirySeconds: - type: integer - description: >- - Set or limit time-to-live of the access tokens to - the specified number of seconds. This should be set - if the authorization server returns opaque tokens. - passwordSecret: + Maximum number of unavailable pods to allow + automatic Pod eviction. A Pod eviction is + allowed when the `maxUnavailable` number of pods + or fewer are unavailable after the eviction. + Setting this value to 0 prevents all voluntary + evictions, so the pods must be evicted manually. + Defaults to 1. + description: Template for Cruise Control `PodDisruptionBudget`. + cruiseControlContainer: type: object properties: - password: - type: string + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. description: >- - The name of the key in the Secret under which - the password is stored. - secretName: - type: string - description: The name of the Secret containing the password. - required: - - password - - secretName - description: Reference to the `Secret` which holds the password. - refreshToken: + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Cruise Control container. + tlsSidecarContainer: type: object properties: - key: - type: string - description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: >- + Template for the Cruise Control TLS sidecar + container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Cruise Control service account. + description: >- + Template to specify how Cruise Control resources, + `Deployments` and `Pods`, are generated. + brokerCapacity: + type: object + properties: + disk: + type: string + pattern: '^[0-9]+([.][0-9]*)?([KMGTPE]i?|e[0-9]+)?$' description: >- - Link to Kubernetes Secret containing the refresh - token which can be used to obtain access token from - the authorization server. - scope: + Broker capacity for disk in bytes. Use a number + value with either standard Kubernetes byte units (K, + M, G, or T), their bibyte (power of two) equivalents + (Ki, Mi, Gi, or Ti), or a byte value with or without + E notation. For example, 100000M, 100000Mi, + 104857600000, or 1e+11. + cpuUtilization: + type: integer + minimum: 0 + maximum: 100 + description: >- + Broker capacity for CPU resource utilization as a + percentage (0 - 100). + cpu: type: string + pattern: '^[0-9]+([.][0-9]{0,3}|[m]?)$' description: >- - OAuth scope to use when authenticating against the - authorization server. Some authorization servers - require this to be set. The possible values depend - on how authorization server is configured. By - default `scope` is not specified when doing the - token endpoint request. - tlsTrustedCertificates: + Broker capacity for CPU resource in cores or + millicores. For example, 1, 1.500, 1500m. For more + information on valid CPU resource units see + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + inboundNetwork: + type: string + pattern: '^[0-9]+([KMG]i?)?B/s$' + description: >- + Broker capacity for inbound network throughput in + bytes per second. Use an integer value with standard + Kubernetes byte units (K, M, G) or their bibyte + (power of two) equivalents (Ki, Mi, Gi) per second. + For example, 10000KiB/s. + outboundNetwork: + type: string + pattern: '^[0-9]+([KMG]i?)?B/s$' + description: >- + Broker capacity for outbound network throughput in + bytes per second. Use an integer value with standard + Kubernetes byte units (K, M, G) or their bibyte + (power of two) equivalents (Ki, Mi, Gi) per second. + For example, 10000KiB/s. + overrides: type: array items: type: object properties: - certificate: + brokers: + type: array + items: + type: integer + description: List of Kafka brokers (broker identifiers). + cpu: type: string + pattern: '^[0-9]+([.][0-9]{0,3}|[m]?)$' description: >- - The name of the file certificate in the - Secret. - secretName: + Broker capacity for CPU resource in cores or + millicores. For example, 1, 1.500, 1500m. For + more information on valid CPU resource units + see + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + inboundNetwork: type: string + pattern: '^[0-9]+([KMG]i?)?B/s$' description: >- - The name of the Secret containing the - certificate. + Broker capacity for inbound network throughput + in bytes per second. Use an integer value with + standard Kubernetes byte units (K, M, G) or + their bibyte (power of two) equivalents (Ki, + Mi, Gi) per second. For example, 10000KiB/s. + outboundNetwork: + type: string + pattern: '^[0-9]+([KMG]i?)?B/s$' + description: >- + Broker capacity for outbound network + throughput in bytes per second. Use an integer + value with standard Kubernetes byte units (K, + M, G) or their bibyte (power of two) + equivalents (Ki, Mi, Gi) per second. For + example, 10000KiB/s. required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the OAuth - server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - tls - - scram-sha-512 - - plain - - oauth + - brokers description: >- - Authentication type. Currently the only supported - types are `tls`, `scram-sha-512`, and `plain`. - `scram-sha-512` type uses SASL SCRAM-SHA-512 - Authentication. `plain` type uses SASL PLAIN - Authentication. `oauth` type uses SASL OAUTHBEARER - Authentication. The `tls` type uses TLS Client - Authentication. The `tls` type is supported only - over TLS connections. - username: - type: string - description: Username used for the authentication. - required: - - type - description: >- - Authentication configuration for connecting to the - cluster. + Overrides for individual brokers. The `overrides` + property lets you specify a different capacity + configuration for different brokers. + description: The Cruise Control `brokerCapacity` configuration. config: x-kubernetes-preserve-unknown-fields: true type: object description: >- - The MirrorMaker consumer config. Properties with the - following prefixes cannot be set: ssl., - bootstrap.servers, group.id, sasl., security., - interceptor.classes (with the exception of: - ssl.endpoint.identification.algorithm, - ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). - tls: + The Cruise Control configuration. For a full list of + configuration options refer to + https://github.com/linkedin/cruise-control/wiki/Configurations. + Note that properties with the following prefixes cannot + be set: bootstrap.servers, client.id, zookeeper., + network., security., + failed.brokers.zk.path,webserver.http., + webserver.api.urlprefix, webserver.session.path, + webserver.accesslog., two.step., + request.reason.required,metric.reporter.sampler.bootstrap.servers, + capacity.config.file, self.healing., ssl., + kafka.broker.failure.detection.enable, + topic.config.provider.class (with the exception of: + ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols, + webserver.http.cors.enabled, webserver.http.cors.origin, + webserver.http.cors.exposeheaders, + webserver.security.enable, webserver.ssl.enable). + metricsConfig: type: object properties: - trustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: Trusted certificates for TLS connection. - description: >- - TLS configuration for connecting MirrorMaker to the - cluster. - required: - - bootstrapServers - - groupId - description: Configuration of source cluster. - producer: + type: + type: string + enum: + - jmxPrometheusExporter + description: >- + Metrics type. Only 'jmxPrometheusExporter' supported + currently. + valueFrom: + type: object + properties: + configMapKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Reference to the key in the ConfigMap containing + the configuration. + description: >- + ConfigMap entry where the Prometheus JMX Exporter + configuration is stored. For details of the + structure of this configuration, see the + {JMXExporter}. + required: + - type + - valueFrom + description: Metrics configuration. + description: >- + Configuration for Cruise Control deployment. Deploys a + Cruise Control instance when specified. + jmxTrans: type: object properties: - bootstrapServers: + image: + type: string + description: The image to use for the JmxTrans. + outputDefinitions: + type: array + items: + type: object + properties: + outputType: + type: string + description: >- + Template for setting the format of the data that + will be pushed.For more information see + https://github.com/jmxtrans/jmxtrans/wiki/OutputWriters[JmxTrans + OutputWriters]. + host: + type: string + description: >- + The DNS/hostname of the remote host that the data + is pushed to. + port: + type: integer + description: >- + The port of the remote host that the data is + pushed to. + flushDelayInSeconds: + type: integer + description: >- + How many seconds the JmxTrans waits before pushing + a new set of data out. + typeNames: + type: array + items: + type: string + description: >- + Template for filtering data to be included in + response to a wildcard query. For more information + see + https://github.com/jmxtrans/jmxtrans/wiki/Queries[JmxTrans + queries]. + name: + type: string + description: >- + Template for setting the name of the output + definition. This is used to identify where to send + the results of queries should be sent. + required: + - outputType + - name + description: >- + Defines the output hosts that will be referenced later + on. For more information on these properties see, + xref:type-JmxTransOutputDefinitionTemplate-reference[`JmxTransOutputDefinitionTemplate` + schema reference]. + logLevel: type: string description: >- - A list of host:port pairs for establishing the initial - connection to the Kafka cluster. - abortOnSendFailure: - type: boolean + Sets the logging level of the JmxTrans deployment.For + more information see, + https://github.com/jmxtrans/jmxtrans-agent/wiki/Troubleshooting[JmxTrans + Logging Level]. + kafkaQueries: + type: array + items: + type: object + properties: + targetMBean: + type: string + description: >- + If using wildcards instead of a specific MBean + then the data is gathered from multiple MBeans. + Otherwise if specifying an MBean then data is + gathered from that specified MBean. + attributes: + type: array + items: + type: string + description: >- + Determine which attributes of the targeted MBean + should be included. + outputs: + type: array + items: + type: string + description: >- + List of the names of output definitions specified + in the spec.kafka.jmxTrans.outputDefinitions that + have defined where JMX metrics are pushed to, and + in which data format. + required: + - targetMBean + - attributes + - outputs description: >- - Flag to set the MirrorMaker to exit on a failed send. - Default value is `true`. - authentication: + Queries to send to the Kafka brokers to define what data + should be read from each broker. For more information on + these properties see, + xref:type-JmxTransQueryTemplate-reference[`JmxTransQueryTemplate` + schema reference]. + resources: type: object properties: - accessToken: + limits: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - key: - type: string - description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string - description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the access - token which was obtained from the authorization - server. - accessTokenIsJwt: - type: boolean - description: >- - Configure whether access token should be treated as - JWT. This should be set to `false` if the - authorization server returns opaque tokens. Defaults - to `true`. - certificateAndKey: + requests: + x-kubernetes-preserve-unknown-fields: true + type: object + description: CPU and memory resources to reserve. + template: + type: object + properties: + deployment: type: object properties: - certificate: - type: string - description: The name of the file certificate in the Secret. - key: - type: string - description: The name of the private key in the Secret. - secretName: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: type: string + enum: + - RollingUpdate + - Recreate description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - key - - secretName - description: >- - Reference to the `Secret` which holds the - certificate and private key pair. - clientId: - type: string - description: >- - OAuth Client ID which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - clientSecret: + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for JmxTrans `Deployment`. + pod: type: object properties: - key: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: + type: array + items: + type: object + properties: + name: + type: string + description: >- + List of references to secrets in the same + namespace to use for pulling any of the images + used by this Pod. When the + `STRIMZI_IMAGE_PULL_SECRETS` environment + variable in Cluster Operator and the + `imagePullSecrets` option are specified, only + the `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is + ignored. + securityContext: + type: object + properties: + fsGroup: + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and + common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds + after the processes running in the pod are sent + a termination signal, and the time when the + processes are forcibly halted with a kill + signal. Set this value to longer than the + expected cleanup time for your process. Value + must be a non-negative integer. A zero value + indicates delete immediately. You might need to + increase the grace period for very large Kafka + clusters, so that the Kafka brokers have enough + time to transfer their work to another broker + before they are terminated. Defaults to 30 + seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: type: string description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: type: string description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the OAuth - client secret which the Kafka client can use to - authenticate against the OAuth server and use the - token endpoint URI. - disableTlsHostnameVerification: - type: boolean - description: >- - Enable or disable TLS hostname verification. Default - value is `false`. - maxTokenExpirySeconds: - type: integer - description: >- - Set or limit time-to-live of the access tokens to - the specified number of seconds. This should be set - if the authorization server returns opaque tokens. - passwordSecret: - type: object - properties: - password: - type: string + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler + will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string description: >- - The name of the key in the Secret under which - the password is stored. - secretName: - type: string - description: The name of the Secret containing the password. - required: - - password - - secretName - description: Reference to the `Secret` which holds the password. - refreshToken: - type: object - properties: - key: + The pod's HostAliases. HostAliases is an + optional list of hosts and IPs that will be + injected into the Pod's hosts file if specified. + tmpDirSizeLimit: type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' description: >- - The key under which the secret value is stored - in the Kubernetes Secret. - secretName: - type: string + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir + volume (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean description: >- - The name of the Kubernetes Secret containing the - secret value. - required: - - key - - secretName - description: >- - Link to Kubernetes Secret containing the refresh - token which can be used to obtain access token from - the authorization server. - scope: - type: string - description: >- - OAuth scope to use when authenticating against the - authorization server. Some authorization servers - require this to be set. The possible values depend - on how authorization server is configured. By - default `scope` is not specified when doing the - token endpoint request. - tlsTrustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: >- - Trusted certificates for TLS connection to the OAuth - server. - tokenEndpointUri: - type: string - description: Authorization server token endpoint URI. - type: - type: string - enum: - - tls - - scram-sha-512 - - plain - - oauth - description: >- - Authentication type. Currently the only supported - types are `tls`, `scram-sha-512`, and `plain`. - `scram-sha-512` type uses SASL SCRAM-SHA-512 - Authentication. `plain` type uses SASL PLAIN - Authentication. `oauth` type uses SASL OAUTHBEARER - Authentication. The `tls` type uses TLS Client - Authentication. The `tls` type is supported only - over TLS connections. - username: - type: string - description: Username used for the authentication. - required: - - type - description: >- - Authentication configuration for connecting to the - cluster. - config: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - The MirrorMaker producer config. Properties with the - following prefixes cannot be set: ssl., - bootstrap.servers, sasl., security., interceptor.classes - (with the exception of: - ssl.endpoint.identification.algorithm, - ssl.cipher.suites, ssl.protocol, ssl.enabled.protocols). - tls: - type: object - properties: - trustedCertificates: - type: array - items: - type: object - properties: - certificate: - type: string - description: >- - The name of the file certificate in the - Secret. - secretName: - type: string - description: >- - The name of the Secret containing the - certificate. - required: - - certificate - - secretName - description: Trusted certificates for TLS connection. - description: >- - TLS configuration for connecting MirrorMaker to the - cluster. - required: - - bootstrapServers - description: Configuration of target cluster. - resources: - type: object - properties: - limits: - x-kubernetes-preserve-unknown-fields: true - type: object - requests: - x-kubernetes-preserve-unknown-fields: true - type: object - description: CPU and memory resources to reserve. - whitelist: - type: string - description: >- - List of topics which are included for mirroring. This option - allows any regular expression using Java-style regular - expressions. Mirroring two topics named A and B is achieved - by using the whitelist `'A\|B'`. Or, as a special case, you - can mirror all topics using the whitelist '*'. You can also - specify multiple regular expressions separated by commas. - jvmOptions: - type: object - properties: - '-XX': - x-kubernetes-preserve-unknown-fields: true - type: object - description: A map of -XX options to the JVM. - '-Xms': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xms option to to the JVM.' - '-Xmx': - type: string - pattern: '[0-9]+[mMgG]?' - description: '-Xmx option to to the JVM.' - gcLoggingEnabled: - type: boolean - description: >- - Specifies whether the Garbage Collection logging is - enabled. The default is false. - javaSystemProperties: - type: array - items: - type: object - properties: - name: - type: string - description: The system property name. - value: - type: string - description: The system property value. - description: >- - A map of additional system properties which will be - passed using the `-D` option to the JVM. - description: JVM Options for pods. - logging: - type: object - properties: - loggers: - x-kubernetes-preserve-unknown-fields: true - type: object - description: A Map from logger name to logger level. - type: - type: string - enum: - - inline - - external - description: 'Logging type, must be either ''inline'' or ''external''.' - valueFrom: - type: object - properties: - configMapKeyRef: + Indicates whether information about services + should be injected into Pod's environment + variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for JmxTrans `Pods`. + container: type: object properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - `ConfigMap` entry where the logging configuration is - stored. - required: - - type - description: Logging configuration for MirrorMaker. - metricsConfig: - type: object - properties: - type: - type: string - enum: - - jmxPrometheusExporter - description: >- - Metrics type. Only 'jmxPrometheusExporter' supported - currently. - valueFrom: - type: object - properties: - configMapKeyRef: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: + type: string + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for JmxTrans container. + serviceAccount: type: object properties: - key: - type: string - name: - type: string - optional: - type: boolean - description: >- - Reference to the key in the ConfigMap containing the - configuration. - description: >- - ConfigMap entry where the Prometheus JMX Exporter - configuration is stored. For details of the structure of - this configuration, see the {JMXExporter}. + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the JmxTrans service account. + description: Template for JmxTrans resources. required: - - type - - valueFrom - description: Metrics configuration. - tracing: + - outputDefinitions + - kafkaQueries + description: >- + Configuration for JmxTrans. When the property is present a + JmxTrans deployment is created for gathering JMX metrics + from each Kafka broker. For more information see + https://github.com/jmxtrans/jmxtrans[JmxTrans GitHub]. + kafkaExporter: type: object properties: - type: + image: + type: string + description: The docker image for the pods. + groupRegex: type: string - enum: - - jaeger description: >- - Type of the tracing used. Currently the only supported - type is `jaeger` for Jaeger tracing. - required: - - type - description: The configuration of tracing in Kafka MirrorMaker. - template: - type: object - properties: - deployment: + Regular expression to specify which consumer groups to + collect. Default value is `.*`. + topicRegex: + type: string + description: >- + Regular expression to specify which topics to collect. + Default value is `.*`. + resources: type: object properties: - metadata: + limits: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - deploymentStrategy: - type: string - enum: - - RollingUpdate - - Recreate - description: >- - DeploymentStrategy which will be used for this - Deployment. Valid values are `RollingUpdate` and - `Recreate`. Defaults to `RollingUpdate`. - description: Template for Kafka MirrorMaker `Deployment`. - pod: - type: object - properties: - metadata: + requests: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - labels: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Labels added to the resource template. Can be - applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - annotations: - x-kubernetes-preserve-unknown-fields: true - type: object - description: >- - Annotations added to the resource template. Can - be applied to different resources such as - `StatefulSets`, `Deployments`, `Pods`, and - `Services`. - description: Metadata applied to the resource. - imagePullSecrets: - type: array - items: - type: object - properties: - name: - type: string - description: >- - List of references to secrets in the same namespace - to use for pulling any of the images used by this - Pod. When the `STRIMZI_IMAGE_PULL_SECRETS` - environment variable in Cluster Operator and the - `imagePullSecrets` option are specified, only the - `imagePullSecrets` variable is used and the - `STRIMZI_IMAGE_PULL_SECRETS` variable is ignored. - securityContext: + description: CPU and memory resources to reserve. + logging: + type: string + description: >- + Only log messages with the given severity or above. + Valid levels: [`info`, `debug`, `trace`]. Default log + level is `info`. + enableSaramaLogging: + type: boolean + description: >- + Enable Sarama logging, a Go client library used by the + Kafka Exporter. + template: + type: object + properties: + deployment: type: object properties: - fsGroup: - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: + metadata: type: object properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + deploymentStrategy: + type: string + enum: + - RollingUpdate + - Recreate + description: >- + Pod replacement strategy for deployment + configuration changes. Valid values are + `RollingUpdate` and `Recreate`. Defaults to + `RollingUpdate`. + description: Template for Kafka Exporter `Deployment`. + pod: + type: object + properties: + metadata: type: object properties: - localhostProfile: - type: string - type: - type: string - supplementalGroups: - type: array - items: - type: integer - sysctls: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + imagePullSecrets: type: array items: type: object properties: name: type: string - value: - type: string - windowsOptions: + description: >- + List of references to secrets in the same + namespace to use for pulling any of the images + used by this Pod. When the + `STRIMZI_IMAGE_PULL_SECRETS` environment + variable in Cluster Operator and the + `imagePullSecrets` option are specified, only + the `imagePullSecrets` variable is used and the + `STRIMZI_IMAGE_PULL_SECRETS` variable is + ignored. + securityContext: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: + fsGroup: + type: integer + fsGroupChangePolicy: type: string - description: >- - Configures pod-level security attributes and common - container settings. - terminationGracePeriodSeconds: - type: integer - minimum: 0 - description: >- - The grace period is the duration in seconds after - the processes running in the pod are sent a - termination signal, and the time when the processes - are forcibly halted with a kill signal. Set this - value to longer than the expected cleanup time for - your process. Value must be a non-negative integer. - A zero value indicates delete immediately. You might - need to increase the grace period for very large - Kafka clusters, so that the Kafka brokers have - enough time to transfer their work to another broker - before they are terminated. Defaults to 30 seconds. - affinity: - type: object - properties: - nodeAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + supplementalGroups: + type: array + items: + type: integer + sysctls: type: array items: type: object properties: - preference: + name: + type: string + value: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: >- + Configures pod-level security attributes and + common container settings. + terminationGracePeriodSeconds: + type: integer + minimum: 0 + description: >- + The grace period is the duration in seconds + after the processes running in the pod are sent + a termination signal, and the time when the + processes are forcibly halted with a kill + signal. Set this value to longer than the + expected cleanup time for your process. Value + must be a non-negative integer. A zero value + indicates delete immediately. You might need to + increase the grace period for very large Kafka + clusters, so that the Kafka brokers have enough + time to transfer their work to another broker + before they are terminated. Defaults to 30 + seconds. + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: + preference: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchFields: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + podAffinity: type: object properties: - nodeSelectorTerms: + preferredDuringSchedulingIgnoredDuringExecution: type: array items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchFields: - type: array - items: - type: object - properties: - key: - type: string - operator: + podAffinityTerm: + type: object + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: type: string - values: - type: array - items: - type: string - podAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: labelSelector: @@ -17056,53 +16450,91 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: type: string topologyKey: type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - labelSelector: + podAntiAffinity: + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true + podAffinityTerm: type: object - namespaces: - type: array - items: - type: string - topologyKey: - type: string - podAntiAffinity: - type: object - properties: - preferredDuringSchedulingIgnoredDuringExecution: - type: array - items: - type: object - properties: - podAffinityTerm: + properties: + labelSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + namespaces: + type: array + items: + type: string + topologyKey: + type: string + weight: + type: integer + requiredDuringSchedulingIgnoredDuringExecution: + type: array + items: type: object properties: labelSelector: @@ -17124,200 +16556,1970 @@ spec: matchLabels: x-kubernetes-preserve-unknown-fields: true type: object + namespaceSelector: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object namespaces: type: array items: type: string topologyKey: type: string - weight: - type: integer - requiredDuringSchedulingIgnoredDuringExecution: - type: array - items: + description: The pod's affinity rules. + tolerations: + type: array + items: + type: object + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + type: integer + value: + type: string + description: The pod's tolerations. + priorityClassName: + type: string + description: >- + The name of the priority class used to assign + priority to the pods. For more information about + priority classes, see {K8sPriorityClass}. + schedulerName: + type: string + description: >- + The name of the scheduler used to dispatch this + `Pod`. If not specified, the default scheduler + will be used. + hostAliases: + type: array + items: + type: object + properties: + hostnames: + type: array + items: + type: string + ip: + type: string + description: >- + The pod's HostAliases. HostAliases is an + optional list of hosts and IPs that will be + injected into the Pod's hosts file if specified. + tmpDirSizeLimit: + type: string + pattern: '^([0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$' + description: >- + Defines the total amount (for example `1Gi`) of + local storage required for temporary EmptyDir + volume (`/tmp`). Default value is `5Mi`. + enableServiceLinks: + type: boolean + description: >- + Indicates whether information about services + should be injected into Pod's environment + variables. + topologySpreadConstraints: + type: array + items: + type: object + properties: + labelSelector: type: object properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - namespaces: + matchExpressions: type: array items: - type: string - topologyKey: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + matchLabels: + x-kubernetes-preserve-unknown-fields: true + type: object + matchLabelKeys: + type: array + items: + type: string + maxSkew: + type: integer + minDomains: + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: The pod's topology spread constraints. + description: Template for Kafka Exporter `Pods`. + service: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for Kafka Exporter `Service`. + container: + type: object + properties: + env: + type: array + items: + type: object + properties: + name: + type: string + description: The environment variable key. + value: + type: string + description: The environment variable value. + description: >- + Environment variables which should be applied to + the container. + securityContext: + type: object + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + type: object + properties: + add: + type: array + items: type: string - description: The pod's affinity rules. - tolerations: - type: array - items: + drop: + type: array + items: + type: string + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + type: integer + runAsNonRoot: + type: boolean + runAsUser: + type: integer + seLinuxOptions: + type: object + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + seccompProfile: + type: object + properties: + localhostProfile: + type: string + type: + type: string + windowsOptions: + type: object + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + description: Security context for the container. + description: Template for the Kafka Exporter container. + serviceAccount: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. + Can be applied to different resources such + as `StatefulSets`, `Deployments`, `Pods`, + and `Services`. + description: Metadata applied to the resource. + description: Template for the Kafka Exporter service account. + description: Customization of deployment templates and pods. + livenessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod liveness check. + readinessProbe: + type: object + properties: + failureThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive failures for the probe to be + considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + initialDelaySeconds: + type: integer + minimum: 0 + description: >- + The initial delay before first the health is first + checked. Default to 15 seconds. Minimum value is 0. + periodSeconds: + type: integer + minimum: 1 + description: >- + How often (in seconds) to perform the probe. Default + to 10 seconds. Minimum value is 1. + successThreshold: + type: integer + minimum: 1 + description: >- + Minimum consecutive successes for the probe to be + considered successful after having failed. Defaults + to 1. Must be 1 for liveness. Minimum value is 1. + timeoutSeconds: + type: integer + minimum: 1 + description: >- + The timeout for each attempted health check. Default + to 5 seconds. Minimum value is 1. + description: Pod readiness check. + description: >- + Configuration of the Kafka Exporter. Kafka Exporter can + provide additional metrics, for example lag of consumer + group at topic/partition. + maintenanceTimeWindows: + type: array + items: + type: string + description: >- + A list of time windows for maintenance tasks (that is, + certificates renewal). Each time window is defined by a cron + expression. + required: + - kafka + - zookeeper + description: >- + The specification of the Kafka and ZooKeeper clusters, and Topic + Operator. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + listeners: + type: array + items: + type: object + properties: + type: + type: string + description: >- + *The `type` property has been deprecated, and should + now be configured using `name`.* The name of the + listener. + name: + type: string + description: The name of the listener. + addresses: + type: array + items: + type: object + properties: + host: + type: string + description: >- + The DNS name or IP address of the Kafka + bootstrap service. + port: + type: integer + description: The port of the Kafka bootstrap service. + description: A list of the addresses for this listener. + bootstrapServers: + type: string + description: >- + A comma-separated list of `host:port` pairs for + connecting to the Kafka cluster using this listener. + certificates: + type: array + items: + type: string + description: >- + A list of TLS certificates which can be used to verify + the identity of the server when connecting to the + given listener. Set only for `tls` and `external` + listeners. + description: Addresses of the internal and external listeners. + clusterId: + type: string + description: Kafka cluster Id. + description: >- + The status of the Kafka and ZooKeeper clusters, and Topic + Operator. + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkarebalances.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: KafkaRebalance + listKind: KafkaRebalanceList + singular: kafkarebalance + plural: kafkarebalances + shortNames: + - kr + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this resource rebalances + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: PendingProposal + description: A proposal has been requested from Cruise Control + jsonPath: '.status.conditions[?(@.type=="PendingProposal")].status' + type: string + - name: ProposalReady + description: A proposal is ready and waiting for approval + jsonPath: '.status.conditions[?(@.type=="ProposalReady")].status' + type: string + - name: Rebalancing + description: Cruise Control is doing the rebalance + jsonPath: '.status.conditions[?(@.type=="Rebalancing")].status' + type: string + - name: Ready + description: The rebalance is complete + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + - name: NotReady + description: There is an error on the custom resource + jsonPath: '.status.conditions[?(@.type=="NotReady")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + mode: + type: string + enum: + - full + - add-brokers + - remove-brokers + description: > + Mode to run the rebalancing. The supported modes are `full`, + `add-brokers`, `remove-brokers`. + + If not specified, the `full` mode is used by default. + + + * `full` mode runs the rebalancing across all the brokers in + the cluster. + + * `add-brokers` mode can be used after scaling up the + cluster to move some replicas to the newly added brokers. + + * `remove-brokers` mode can be used before scaling down the + cluster to move replicas out of the brokers to be removed. + brokers: + type: array + items: + type: integer + description: >- + The list of newly added brokers in case of scaling up or the + ones to be removed in case of scaling down to use for + rebalancing. This list can be used only with rebalancing + mode `add-brokers` and `removed-brokers`. It is ignored with + `full` mode. + goals: + type: array + items: + type: string + description: >- + A list of goals, ordered by decreasing priority, to use for + generating and executing the rebalance proposal. The + supported goals are available at + https://github.com/linkedin/cruise-control#goals. If an + empty goals list is provided, the goals declared in the + default.goals Cruise Control configuration parameter are + used. + skipHardGoalCheck: + type: boolean + description: >- + Whether to allow the hard goals specified in the Kafka CR to + be skipped in optimization proposal generation. This can be + useful when some of those hard goals are preventing a + balance solution being found. Default is false. + rebalanceDisk: + type: boolean + description: >- + Enables intra-broker disk balancing, which balances disk + space utilization between disks on the same broker. Only + applies to Kafka deployments that use JBOD storage with + multiple disks. When enabled, inter-broker balancing is + disabled. Default is false. + excludedTopics: + type: string + description: >- + A regular expression where any matching topics will be + excluded from the calculation of optimization proposals. + This expression will be parsed by the + java.util.regex.Pattern class; for more information on the + supported format consult the documentation for that class. + concurrentPartitionMovementsPerBroker: + type: integer + minimum: 0 + description: >- + The upper bound of ongoing partition replica movements going + into/out of each broker. Default is 5. + concurrentIntraBrokerPartitionMovements: + type: integer + minimum: 0 + description: >- + The upper bound of ongoing partition replica movements + between disks within each broker. Default is 2. + concurrentLeaderMovements: + type: integer + minimum: 0 + description: >- + The upper bound of ongoing partition leadership movements. + Default is 1000. + replicationThrottle: + type: integer + minimum: 0 + description: >- + The upper bound, in bytes per second, on the bandwidth used + to move replicas. There is no limit by default. + replicaMovementStrategies: + type: array + items: + type: string + description: >- + A list of strategy class names used to determine the + execution order for the replica movements in the generated + optimization proposal. By default + BaseReplicaMovementStrategy is used, which will execute the + replica movements in the order that they were generated. + description: The specification of the Kafka rebalance. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + sessionId: + type: string + description: >- + The session identifier for requests to Cruise Control + pertaining to this KafkaRebalance resource. This is used by + the Kafka Rebalance operator to track the status of ongoing + rebalancing operations. + optimizationResult: + x-kubernetes-preserve-unknown-fields: true + type: object + description: A JSON object describing the optimization result. + description: The status of the Kafka rebalance. + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkatopics.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: KafkaTopic + listKind: KafkaTopicList + singular: kafkatopic + plural: kafkatopics + shortNames: + - kt + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this topic belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Partitions + description: The desired number of partitions in the topic + jsonPath: .spec.partitions + type: integer + - name: Replication factor + description: The desired number of replicas of each partition + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + description: >- + The number of partitions the topic should have. This cannot + be decreased after topic creation. It can be increased after + topic creation, but it is important to understand the + consequences that has, especially for topics with semantic + partitioning. When absent this will default to the broker + configuration for `num.partitions`. + replicas: + type: integer + minimum: 1 + maximum: 32767 + description: >- + The number of replicas the topic should have. When absent + this will default to the broker configuration for + `default.replication.factor`. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The topic configuration. + topicName: + type: string + description: >- + The name of the topic. When absent this will default to the + metadata.name of the topic. It is recommended to not set + this unless the topic name is not a valid Kubernetes + resource name. + description: The specification of the topic. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + topicName: + type: string + description: Topic name. + description: The status of the topic. + - name: v1beta1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this topic belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Partitions + description: The desired number of partitions in the topic + jsonPath: .spec.partitions + type: integer + - name: Replication factor + description: The desired number of replicas of each partition + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + description: >- + The number of partitions the topic should have. This cannot + be decreased after topic creation. It can be increased after + topic creation, but it is important to understand the + consequences that has, especially for topics with semantic + partitioning. When absent this will default to the broker + configuration for `num.partitions`. + replicas: + type: integer + minimum: 1 + maximum: 32767 + description: >- + The number of replicas the topic should have. When absent + this will default to the broker configuration for + `default.replication.factor`. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The topic configuration. + topicName: + type: string + description: >- + The name of the topic. When absent this will default to the + metadata.name of the topic. It is recommended to not set + this unless the topic name is not a valid Kubernetes + resource name. + description: The specification of the topic. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + topicName: + type: string + description: Topic name. + description: The status of the topic. + - name: v1alpha1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this topic belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Partitions + description: The desired number of partitions in the topic + jsonPath: .spec.partitions + type: integer + - name: Replication factor + description: The desired number of replicas of each partition + jsonPath: .spec.replicas + type: integer + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + description: >- + The number of partitions the topic should have. This cannot + be decreased after topic creation. It can be increased after + topic creation, but it is important to understand the + consequences that has, especially for topics with semantic + partitioning. When absent this will default to the broker + configuration for `num.partitions`. + replicas: + type: integer + minimum: 1 + maximum: 32767 + description: >- + The number of replicas the topic should have. When absent + this will default to the broker configuration for + `default.replication.factor`. + config: + x-kubernetes-preserve-unknown-fields: true + type: object + description: The topic configuration. + topicName: + type: string + description: >- + The name of the topic. When absent this will default to the + metadata.name of the topic. It is recommended to not set + this unless the topic name is not a valid Kubernetes + resource name. + description: The specification of the topic. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + topicName: + type: string + description: Topic name. + description: The status of the topic. + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: strimzi-cluster-operator-kafka-broker-delegation + labels: + app: strimzi +subjects: + - kind: ServiceAccount + name: strimzi-cluster-operator + namespace: private +roleRef: + kind: ClusterRole + name: strimzi-kafka-broker + apiGroup: rbac.authorization.k8s.io + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: strimzi-cluster-operator + labels: + app: strimzi + namespace: private +data: + log4j2.properties: > + name = COConfig + + monitorInterval = 30 + + + appender.console.type = Console + + appender.console.name = STDOUT + + appender.console.layout.type = PatternLayout + + appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - + %m%n + + + rootLogger.level = ${env:STRIMZI_LOG_LEVEL:-INFO} + + rootLogger.appenderRefs = stdout + + rootLogger.appenderRef.console.ref = STDOUT + + + # Kafka AdminClient logging is a bit noisy at INFO level + + logger.kafka.name = org.apache.kafka + + logger.kafka.level = FATAL + + + # Zookeeper is very verbose even on INFO level -> We set it to WARN by default + + logger.zookeepertrustmanager.name = org.apache.zookeeper + + logger.zookeepertrustmanager.level = FATAL + + + # Keeps separate level for Netty logging -> to not be changed by the root logger + + logger.netty.name = io.netty + + logger.netty.level = INFO + + + # Keeps separate log level for OkHttp client + + logger.okhttp3.name = okhttp3 + + logger.okhttp3.level = INFO + + log.segment.bytes=1073741824 + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kafkausers.kafka.strimzi.io + labels: + app: strimzi + strimzi.io/crd-install: 'true' +spec: + group: kafka.strimzi.io + names: + kind: KafkaUser + listKind: KafkaUserList + singular: kafkauser + plural: kafkausers + shortNames: + - ku + categories: + - strimzi + scope: Namespaced + conversion: + strategy: None + versions: + - name: v1beta2 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this user belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Authentication + description: How the user is authenticated + jsonPath: .spec.authentication.type + type: string + - name: Authorization + description: How the user is authorised + jsonPath: .spec.authorization.type + type: string + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + authentication: + type: object + properties: + password: + type: object + properties: + valueFrom: + type: object + properties: + secretKeyRef: + type: object + properties: + key: + type: string + name: + type: string + optional: + type: boolean + description: >- + Selects a key of a Secret in the resource's + namespace. + description: Secret from which the password should be read. + required: + - valueFrom + description: >- + Specify the password for the user. If not set, a new + password is generated by the User Operator. + type: + type: string + enum: + - tls + - tls-external + - scram-sha-512 + description: Authentication type. + required: + - type + description: >- + Authentication mechanism enabled for this Kafka user. The + supported authentication mechanisms are `scram-sha-512`, + `tls`, and `tls-external`. + + + * `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 + credentials. + + * `tls` generates a secret with user certificate for mutual + TLS authentication. + + * `tls-external` does not generate a user certificate. But + prepares the user for using mutual TLS authentication using + a user certificate generated outside the User Operator. + ACLs and quotas set for this user are configured in the `CN=` format. + + Authentication is optional. If authentication is not + configured, no credentials are generated. ACLs and quotas + set for the user are configured in the `` format + suitable for SASL authentication. + authorization: + type: object + properties: + acls: + type: array + items: + type: object + properties: + host: + type: string + description: >- + The host from which the action described in the + ACL rule is allowed or denied. + operation: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: >- + Operation which will be allowed or denied. + Supported operations are: Read, Write, Create, + Delete, Alter, Describe, ClusterAction, + AlterConfigs, DescribeConfigs, IdempotentWrite and + All. + operations: + type: array + items: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: >- + List of operations which will be allowed or + denied. Supported operations are: Read, Write, + Create, Delete, Alter, Describe, ClusterAction, + AlterConfigs, DescribeConfigs, IdempotentWrite and + All. + resource: type: object properties: - effect: - type: string - key: + name: type: string - operator: + description: >- + Name of resource for which given ACL rule + applies. Can be combined with `patternType` + field to use prefix pattern. + patternType: type: string - tolerationSeconds: - type: integer - value: + enum: + - literal + - prefix + description: >- + Describes the pattern used in the resource + field. The supported types are `literal` and + `prefix`. With `literal` pattern type, the + resource field will be used as a definition of + a full name. With `prefix` pattern type, the + resource name will be used only as a prefix. + Default value is `literal`. + type: type: string - description: The pod's tolerations. - priorityClassName: - type: string - description: >- - The name of the priority class used to assign - priority to the pods. For more information about - priority classes, see {K8sPriorityClass}. - schedulerName: - type: string - description: >- - The name of the scheduler used to dispatch this - `Pod`. If not specified, the default scheduler will - be used. - hostAliases: - type: array - items: - type: object - properties: - hostnames: - type: array - items: + enum: + - topic + - group + - cluster + - transactionalId + description: >- + Resource type. The available resource types + are `topic`, `group`, `cluster`, and + `transactionalId`. + required: + - type + description: >- + Indicates the resource for which given ACL rule + applies. + type: + type: string + enum: + - allow + - deny + description: >- + The type of the rule. Currently the only supported + type is `allow`. ACL rules with type `allow` are + used to allow user to execute the specified + operations. Default value is `allow`. + required: + - resource + description: List of ACL rules which should be applied to this user. + type: + type: string + enum: + - simple + description: >- + Authorization type. Currently the only supported type is + `simple`. `simple` authorization type uses Kafka's + `kafka.security.authorizer.AclAuthorizer` class for + authorization. + required: + - acls + - type + description: Authorization rules for this Kafka user. + quotas: + type: object + properties: + consumerByteRate: + type: integer + minimum: 0 + description: >- + A quota on the maximum bytes per-second that each client + group can fetch from a broker before the clients in the + group are throttled. Defined on a per-broker basis. + controllerMutationRate: + type: number + minimum: 0 + description: >- + A quota on the rate at which mutations are accepted for + the create topics request, the create partitions request + and the delete topics request. The rate is accumulated + by the number of partitions created or deleted. + producerByteRate: + type: integer + minimum: 0 + description: >- + A quota on the maximum bytes per-second that each client + group can publish to a broker before the clients in the + group are throttled. Defined on a per-broker basis. + requestPercentage: + type: integer + minimum: 0 + description: >- + A quota on the maximum CPU utilization of each client + group as a percentage of network and I/O threads. + description: >- + Quotas on requests to control the broker resources used by + clients. Network bandwidth and request rate quotas can be + enforced.Kafka documentation for Kafka User quotas can be + found at + http://kafka.apache.org/documentation/#design_quotas. + template: + type: object + properties: + secret: + type: object + properties: + metadata: + type: object + properties: + labels: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true + type: object + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: >- + Template for KafkaUser resources. The template allows + users to specify how the `Secret` with password or TLS + certificates is generated. + description: Template to specify how Kafka User `Secrets` are generated. + description: The specification of the user. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + username: + type: string + description: Username. + secret: + type: string + description: The name of `Secret` where the credentials are stored. + description: The status of the Kafka User. + - name: v1beta1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this user belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Authentication + description: How the user is authenticated + jsonPath: .spec.authentication.type + type: string + - name: Authorization + description: How the user is authorised + jsonPath: .spec.authorization.type + type: string + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + authentication: + type: object + properties: + password: + type: object + properties: + valueFrom: + type: object + properties: + secretKeyRef: + type: object + properties: + key: type: string - ip: - type: string - description: >- - The pod's HostAliases. HostAliases is an optional - list of hosts and IPs that will be injected into the - Pod's hosts file if specified. - enableServiceLinks: - type: boolean - description: >- - Indicates whether information about services should - be injected into Pod's environment variables. - topologySpreadConstraints: - type: array - items: + name: + type: string + optional: + type: boolean + description: >- + Selects a key of a Secret in the resource's + namespace. + description: Secret from which the password should be read. + required: + - valueFrom + description: >- + Specify the password for the user. If not set, a new + password is generated by the User Operator. + type: + type: string + enum: + - tls + - tls-external + - scram-sha-512 + description: Authentication type. + required: + - type + description: >- + Authentication mechanism enabled for this Kafka user. The + supported authentication mechanisms are `scram-sha-512`, + `tls`, and `tls-external`. + + + * `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 + credentials. + + * `tls` generates a secret with user certificate for mutual + TLS authentication. + + * `tls-external` does not generate a user certificate. But + prepares the user for using mutual TLS authentication using + a user certificate generated outside the User Operator. + ACLs and quotas set for this user are configured in the `CN=` format. + + Authentication is optional. If authentication is not + configured, no credentials are generated. ACLs and quotas + set for the user are configured in the `` format + suitable for SASL authentication. + authorization: + type: object + properties: + acls: + type: array + items: + type: object + properties: + host: + type: string + description: >- + The host from which the action described in the + ACL rule is allowed or denied. + operation: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: >- + Operation which will be allowed or denied. + Supported operations are: Read, Write, Create, + Delete, Alter, Describe, ClusterAction, + AlterConfigs, DescribeConfigs, IdempotentWrite and + All. + operations: + type: array + items: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: >- + List of operations which will be allowed or + denied. Supported operations are: Read, Write, + Create, Delete, Alter, Describe, ClusterAction, + AlterConfigs, DescribeConfigs, IdempotentWrite and + All. + resource: type: object properties: - labelSelector: - type: object - properties: - matchExpressions: - type: array - items: - type: object - properties: - key: - type: string - operator: - type: string - values: - type: array - items: - type: string - matchLabels: - x-kubernetes-preserve-unknown-fields: true - type: object - maxSkew: - type: integer - topologyKey: + name: type: string - whenUnsatisfiable: + description: >- + Name of resource for which given ACL rule + applies. Can be combined with `patternType` + field to use prefix pattern. + patternType: type: string - description: The pod's topology spread constraints. - description: Template for Kafka MirrorMaker `Pods`. - mirrorMakerContainer: + enum: + - literal + - prefix + description: >- + Describes the pattern used in the resource + field. The supported types are `literal` and + `prefix`. With `literal` pattern type, the + resource field will be used as a definition of + a full name. With `prefix` pattern type, the + resource name will be used only as a prefix. + Default value is `literal`. + type: + type: string + enum: + - topic + - group + - cluster + - transactionalId + description: >- + Resource type. The available resource types + are `topic`, `group`, `cluster`, and + `transactionalId`. + required: + - type + description: >- + Indicates the resource for which given ACL rule + applies. + type: + type: string + enum: + - allow + - deny + description: >- + The type of the rule. Currently the only supported + type is `allow`. ACL rules with type `allow` are + used to allow user to execute the specified + operations. Default value is `allow`. + required: + - resource + description: List of ACL rules which should be applied to this user. + type: + type: string + enum: + - simple + description: >- + Authorization type. Currently the only supported type is + `simple`. `simple` authorization type uses Kafka's + `kafka.security.authorizer.AclAuthorizer` class for + authorization. + required: + - acls + - type + description: Authorization rules for this Kafka user. + quotas: + type: object + properties: + consumerByteRate: + type: integer + minimum: 0 + description: >- + A quota on the maximum bytes per-second that each client + group can fetch from a broker before the clients in the + group are throttled. Defined on a per-broker basis. + controllerMutationRate: + type: number + minimum: 0 + description: >- + A quota on the rate at which mutations are accepted for + the create topics request, the create partitions request + and the delete topics request. The rate is accumulated + by the number of partitions created or deleted. + producerByteRate: + type: integer + minimum: 0 + description: >- + A quota on the maximum bytes per-second that each client + group can publish to a broker before the clients in the + group are throttled. Defined on a per-broker basis. + requestPercentage: + type: integer + minimum: 0 + description: >- + A quota on the maximum CPU utilization of each client + group as a percentage of network and I/O threads. + description: >- + Quotas on requests to control the broker resources used by + clients. Network bandwidth and request rate quotas can be + enforced.Kafka documentation for Kafka User quotas can be + found at + http://kafka.apache.org/documentation/#design_quotas. + template: + type: object + properties: + secret: type: object properties: - env: - type: array - items: - type: object - properties: - name: - type: string - description: The environment variable key. - value: - type: string - description: The environment variable value. - description: >- - Environment variables which should be applied to the - container. - securityContext: + metadata: type: object properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - type: object - properties: - add: - type: array - items: - type: string - drop: - type: array - items: - type: string - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - type: integer - runAsNonRoot: - type: boolean - runAsUser: - type: integer - seLinuxOptions: + labels: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - seccompProfile: + description: >- + Labels added to the resource template. Can be + applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + annotations: + x-kubernetes-preserve-unknown-fields: true type: object - properties: - localhostProfile: - type: string - type: - type: string - windowsOptions: + description: >- + Annotations added to the resource template. Can + be applied to different resources such as + `StatefulSets`, `Deployments`, `Pods`, and + `Services`. + description: Metadata applied to the resource. + description: >- + Template for KafkaUser resources. The template allows + users to specify how the `Secret` with password or TLS + certificates is generated. + description: Template to specify how Kafka User `Secrets` are generated. + description: The specification of the user. + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + type: + type: string + description: >- + The unique identifier of a condition, used to + distinguish between other conditions in the resource. + status: + type: string + description: >- + The status of the condition, either True, False or + Unknown. + lastTransitionTime: + type: string + description: >- + Last time the condition of a type changed from one + status to another. The required format is + 'yyyy-MM-ddTHH:mm:ssZ', in the UTC time zone. + reason: + type: string + description: >- + The reason for the condition's last transition (a + single word in CamelCase). + message: + type: string + description: >- + Human-readable message indicating details about the + condition's last transition. + description: List of status conditions. + observedGeneration: + type: integer + description: >- + The generation of the CRD that was last reconciled by the + operator. + username: + type: string + description: Username. + secret: + type: string + description: The name of `Secret` where the credentials are stored. + description: The status of the Kafka User. + - name: v1alpha1 + served: true + storage: false + subresources: + status: {} + additionalPrinterColumns: + - name: Cluster + description: The name of the Kafka cluster this user belongs to + jsonPath: .metadata.labels.strimzi\.io/cluster + type: string + - name: Authentication + description: How the user is authenticated + jsonPath: .spec.authentication.type + type: string + - name: Authorization + description: How the user is authorised + jsonPath: .spec.authorization.type + type: string + - name: Ready + description: The state of the custom resource + jsonPath: '.status.conditions[?(@.type=="Ready")].status' + type: string + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + authentication: + type: object + properties: + password: + type: object + properties: + valueFrom: + type: object + properties: + secretKeyRef: type: object properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: + key: type: string - runAsUserName: + name: type: string - description: Security context for the container. - description: Template for Kafka MirrorMaker container. - podDisruptionBudget: + optional: + type: boolean + description: >- + Selects a key of a Secret in the resource's + namespace. + description: Secret from which the password should be read. + required: + - valueFrom + description: >- + Specify the password for the user. If not set, a new + password is generated by the User Operator. + type: + type: string + enum: + - tls + - tls-external + - scram-sha-512 + description: Authentication type. + required: + - type + description: >- + Authentication mechanism enabled for this Kafka user. The + supported authentication mechanisms are `scram-sha-512`, + `tls`, and `tls-external`. + + + * `scram-sha-512` generates a secret with SASL SCRAM-SHA-512 + credentials. + + * `tls` generates a secret with user certificate for mutual + TLS authentication. + + * `tls-external` does not generate a user certificate. But + prepares the user for using mutual TLS authentication using + a user certificate generated outside the User Operator. + ACLs and quotas set for this user are configured in the `CN=` format. + + Authentication is optional. If authentication is not + configured, no credentials are generated. ACLs and quotas + set for the user are configured in the `` format + suitable for SASL authentication. + authorization: + type: object + properties: + acls: + type: array + items: + type: object + properties: + host: + type: string + description: >- + The host from which the action described in the + ACL rule is allowed or denied. + operation: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: >- + Operation which will be allowed or denied. + Supported operations are: Read, Write, Create, + Delete, Alter, Describe, ClusterAction, + AlterConfigs, DescribeConfigs, IdempotentWrite and + All. + operations: + type: array + items: + type: string + enum: + - Read + - Write + - Create + - Delete + - Alter + - Describe + - ClusterAction + - AlterConfigs + - DescribeConfigs + - IdempotentWrite + - All + description: >- + List of operations which will be allowed or + denied. Supported operations are: Read, Write, + Create, Delete, Alter, Describe, ClusterAction, + AlterConfigs, DescribeConfigs, IdempotentWrite and + All. + resource: + type: object + properties: + name: + type: string + description: >- + Name of resource for which given ACL rule + applies. Can be combined with `patternType` + field to use prefix pattern. + patternType: + type: string + enum: + - literal + - prefix + description: >- + Describes the pattern used in the resource + field. The supported types are `literal` and + `prefix`. With `literal` pattern type, the + resource field will be used as a definition of + a full name. With `prefix` pattern type, the + resource name will be used only as a prefix. + Default value is `literal`. + type: + type: string + enum: + - topic + - group + - cluster + - transactionalId + description: >- + Resource type. The available resource types + are `topic`, `group`, `cluster`, and + `transactionalId`. + required: + - type + description: >- + Indicates the resource for which given ACL rule + applies. + type: + type: string + enum: + - allow + - deny + description: >- + The type of the rule. Currently the only supported + type is `allow`. ACL rules with type `allow` are + used to allow user to execute the specified + operations. Default value is `allow`. + required: + - resource + description: List of ACL rules which should be applied to this user. + type: + type: string + enum: + - simple + description: >- + Authorization type. Currently the only supported type is + `simple`. `simple` authorization type uses Kafka's + `kafka.security.authorizer.AclAuthorizer` class for + authorization. + required: + - acls + - type + description: Authorization rules for this Kafka user. + quotas: + type: object + properties: + consumerByteRate: + type: integer + minimum: 0 + description: >- + A quota on the maximum bytes per-second that each client + group can fetch from a broker before the clients in the + group are throttled. Defined on a per-broker basis. + controllerMutationRate: + type: number + minimum: 0 + description: >- + A quota on the rate at which mutations are accepted for + the create topics request, the create partitions request + and the delete topics request. The rate is accumulated + by the number of partitions created or deleted. + producerByteRate: + type: integer + minimum: 0 + description: >- + A quota on the maximum bytes per-second that each client + group can publish to a broker before the clients in the + group are throttled. Defined on a per-broker basis. + requestPercentage: + type: integer + minimum: 0 + description: >- + A quota on the maximum CPU utilization of each client + group as a percentage of network and I/O threads. + description: >- + Quotas on requests to control the broker resources used by + clients. Network bandwidth and request rate quotas can be + enforced.Kafka documentation for Kafka User quotas can be + found at + http://kafka.apache.org/documentation/#design_quotas. + template: + type: object + properties: + secret: type: object properties: metadata: @@ -17339,101 +18541,13 @@ spec: be applied to different resources such as `StatefulSets`, `Deployments`, `Pods`, and `Services`. - description: >- - Metadata to apply to the - `PodDistruptionBugetTemplate` resource. - maxUnavailable: - type: integer - minimum: 0 - description: >- - Maximum number of unavailable pods to allow - automatic Pod eviction. A Pod eviction is allowed - when the `maxUnavailable` number of pods or fewer - are unavailable after the eviction. Setting this - value to 0 prevents all voluntary evictions, so the - pods must be evicted manually. Defaults to 1. - description: Template for Kafka MirrorMaker `PodDisruptionBudget`. - description: >- - Template to specify how Kafka MirrorMaker resources, - `Deployments` and `Pods`, are generated. - livenessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 - description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod liveness checking. - readinessProbe: - type: object - properties: - failureThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive failures for the probe to be - considered failed after having succeeded. Defaults to 3. - Minimum value is 1. - initialDelaySeconds: - type: integer - minimum: 0 - description: >- - The initial delay before first the health is first - checked. Default to 15 seconds. Minimum value is 0. - periodSeconds: - type: integer - minimum: 1 - description: >- - How often (in seconds) to perform the probe. Default to - 10 seconds. Minimum value is 1. - successThreshold: - type: integer - minimum: 1 - description: >- - Minimum consecutive successes for the probe to be - considered successful after having failed. Defaults to - 1. Must be 1 for liveness. Minimum value is 1. - timeoutSeconds: - type: integer - minimum: 1 + description: Metadata applied to the resource. description: >- - The timeout for each attempted health check. Default to - 5 seconds. Minimum value is 1. - description: Pod readiness checking. - required: - - replicas - - consumer - - producer - - whitelist - description: The specification of Kafka MirrorMaker. + Template for KafkaUser resources. The template allows + users to specify how the `Secret` with password or TLS + certificates is generated. + description: Template to specify how Kafka User `Secrets` are generated. + description: The specification of the user. status: type: object properties: @@ -17474,74 +18588,10 @@ spec: description: >- The generation of the CRD that was last reconciled by the operator. - labelSelector: + username: type: string - description: Label selector for pods providing this resource. - replicas: - type: integer - description: >- - The current number of pods being used to provide this - resource. - description: The status of Kafka MirrorMaker. - ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: strimzi-cluster-operator - labels: - app: strimzi - namespace: private -data: - log4j2.properties: > - name = COConfig - - monitorInterval = 30 - - - appender.console.type = Console - - appender.console.name = STDOUT - - appender.console.layout.type = PatternLayout - - appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - - %m%n - - - rootLogger.level = ${env:STRIMZI_LOG_LEVEL:-INFO} - - rootLogger.appenderRefs = stdout - - rootLogger.appenderRef.console.ref = STDOUT - - rootLogger.additivity = false - - - # Kafka AdminClient logging is a bit noisy at INFO level - - logger.kafka.name = org.apache.kafka - - logger.kafka.level = WARN - - logger.kafka.additivity = false - - - # Zookeeper is very verbose even on INFO level -> We set it to WARN by - default - - logger.zookeepertrustmanager.name = org.apache.zookeeper - - logger.zookeepertrustmanager.level = WARN - - logger.zookeepertrustmanager.additivity = false - - - # Keeps separate level for Netty logging -> to not be changed by the root - logger - - logger.netty.name = io.netty - - logger.netty.level = INFO - - logger.netty.additivity = false + description: Username. + secret: + type: string + description: The name of `Secret` where the credentials are stored. + description: The status of the Kafka User. diff --git a/plano b/plano deleted file mode 120000 index 0f4ec84..0000000 --- a/plano +++ /dev/null @@ -1 +0,0 @@ -subrepos/skewer/plano \ No newline at end of file diff --git a/plano b/plano new file mode 100755 index 0000000..476427d --- /dev/null +++ b/plano @@ -0,0 +1,28 @@ +#!/usr/bin/python3 +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import sys + +sys.path.insert(0, "python") + +from plano import PlanoCommand + +if __name__ == "__main__": + PlanoCommand().main() diff --git a/python/plano b/python/plano new file mode 120000 index 0000000..431570b --- /dev/null +++ b/python/plano @@ -0,0 +1 @@ +../external/skewer/python/plano \ No newline at end of file diff --git a/python/skewer b/python/skewer new file mode 120000 index 0000000..0cc66e2 --- /dev/null +++ b/python/skewer @@ -0,0 +1 @@ +../external/skewer/python/skewer \ No newline at end of file diff --git a/skewer.yaml b/skewer.yaml index 4d885f1..47546f1 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -1,6 +1,6 @@ title: Trade Zoo -subtitle: A simple trading application that runs in the public cloud but keeps its data in a private Kafka cluster -github_actions_url: https://github.com/skupperproject/skupper-example-trade-zoo/actions/workflows/main.yaml +subtitle: | + A simple trading application that runs in the public cloud but keeps its data in a private Kafka cluster overview: | This example is a simple Kafka application that shows how you can use Skupper to access a Kafka cluster at a remote site without @@ -33,27 +33,30 @@ overview: | [strimzi]: https://strimzi.io/ sites: public: - kubeconfig: ~/.kube/config-public + title: Public + platform: kubernetes namespace: public + env: + KUBECONFIG: ~/.kube/config-public private: - kubeconfig: ~/.kube/config-private + title: Private + platform: kubernetes namespace: private + env: + KUBECONFIG: ~/.kube/config-private steps: - - standard: configure_separate_console_sessions - - standard: access_your_clusters - - standard: set_up_your_namespaces - - standard: install_skupper_in_your_namespaces - - standard: check_the_status_of_your_namespaces - - standard: link_your_namespaces + - standard: install_the_skupper_command_line_tool + - standard: kubernetes/set_up_your_namespaces - title: Deploy the Kafka cluster preamble: | - In the private namespace, use the `kubectl create` and `kubectl - apply` commands with the listed YAML files to install the - operator and deploy the cluster and topic. + In Private, use the `kubectl create` and `kubectl apply` + commands with the listed YAML files to install the operator and + deploy the cluster and topic. commands: private: - run: kubectl create -f kafka-cluster/strimzi.yaml - run: kubectl apply -f kafka-cluster/cluster1.yaml + - await_resource: kafka/cluster1 - run: kubectl wait --for condition=ready --timeout 900s kafka/cluster1 postamble: | **Note:** @@ -87,32 +90,33 @@ steps: more information. [advertised-addresses]: https://strimzi.io/docs/operators/in-development/configuring.html#property-listener-config-broker-reference + - title: Deploy the application services + preamble: | + In Public, use the `kubectl apply` command with the listed YAML + files to install the application services. + commands: + public: + - run: kubectl apply -f order-processor/kubernetes.yaml + - run: kubectl apply -f market-data/kubernetes.yaml + - run: kubectl apply -f frontend/kubernetes.yaml + - standard: kubernetes/create_your_sites + - standard: kubernetes/link_your_sites - title: Expose the Kafka cluster preamble: | - In the private namespace, use `skupper expose` with the - `--headless` option to expose the Kafka cluster as a headless - service on the Skupper network. + In Private, use `skupper expose` with the `--headless` option to + expose the Kafka cluster as a headless service on the Skupper + network. - Then, in the public namespace, use `kubectl get service` to - check that the `cluster1-kafka-brokers` service appears after a - moment. + Then, in Public, use `kubectl get service` to check that the + `cluster1-kafka-brokers` service appears after a moment. commands: private: + - await_resource: statefulset/cluster1-kafka - run: skupper expose statefulset/cluster1-kafka --headless --port 9092 public: + - await_resource: service/cluster1-kafka-brokers - run: kubectl get service/cluster1-kafka-brokers - - title: Deploy the application services - preamble: | - In the public namespace, use the `kubectl apply` command with - the listed YAML files to install the application services. - commands: - public: - - await: service/cluster1-kafka-brokers - - run: kubectl apply -f order-processor/kubernetes.yaml - - run: kubectl apply -f market-data/kubernetes.yaml - - run: kubectl apply -f frontend/kubernetes.yaml - - standard: test_the_application - - standard: accessing_the_web_console + - standard: hello_world/access_the_frontend - standard: cleaning_up commands: private: diff --git a/subrepos/skewer/.github/workflows/main.yaml b/subrepos/skewer/.github/workflows/main.yaml deleted file mode 100644 index e32360c..0000000 --- a/subrepos/skewer/.github/workflows/main.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: main -on: - push: - pull_request: - schedule: - - cron: "0 0 * * 0" -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.x" - - uses: manusa/actions-setup-minikube@v2.6.0 - with: - minikube version: "v1.25.2" - kubernetes version: "v1.24.1" - github token: ${{ secrets.GITHUB_TOKEN }} - - run: curl -f https://skupper.io/install.sh | sh - - run: echo "$HOME/.local/bin" >> $GITHUB_PATH - - run: ./plano test diff --git a/subrepos/skewer/.gitrepo b/subrepos/skewer/.gitrepo deleted file mode 100644 index af6c17e..0000000 --- a/subrepos/skewer/.gitrepo +++ /dev/null @@ -1,12 +0,0 @@ -; DO NOT EDIT (unless you know what you are doing) -; -; This subdirectory is a git "subrepo", and this file is maintained by the -; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme -; -[subrepo] - remote = git@github.com:skupperproject/skewer.git - branch = main - commit = 64703c553a102e5caa771258b9048167626b3672 - parent = fdefc96d24edb0ad1470419a79ab597f3c15b890 - method = merge - cmdver = 0.4.3 diff --git a/subrepos/skewer/.planofile b/subrepos/skewer/.planofile deleted file mode 100644 index cfedd59..0000000 --- a/subrepos/skewer/.planofile +++ /dev/null @@ -1,23 +0,0 @@ -from skewer import * - -@command -def generate(app): - """ - Generate README.md from the data in skewer.yaml - """ - with working_dir("test-example"): - generate_readme("skewer.yaml", "README.md") - print(read("README.md")) - -@command -def test(app): - with working_dir("test-example"): - generate_readme("skewer.yaml", "README.md") - check_file("README.md") - run_steps_on_minikube("skewer.yaml") - -@command -def render(app): - check_program("pandoc") - run(f"pandoc -o README.html README.md") - print(f"file:{get_real_path('README.html')}") diff --git a/subrepos/skewer/README.md b/subrepos/skewer/README.md deleted file mode 100644 index 3d096d3..0000000 --- a/subrepos/skewer/README.md +++ /dev/null @@ -1,261 +0,0 @@ -# Skewer - -[![main](https://github.com/skupperproject/skewer/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skewer/actions/workflows/main.yaml) - -A library for documenting and testing Skupper examples - -A `skewer.yaml` file describes the steps and commands to achieve an -objective using Skupper. Skewer takes the `skewer.yaml` file as input -and produces two outputs: a `README.md` file and a test routine. - -## An example example - -[Example `skewer.yaml` file](test-example/skewer.yaml) - -[Example `README.md` output](test-example/README.md) - -## Setting up Skewer for your own example - -Make sure you have git-subrepo installed: - - dnf install git-subrepo - -Add the Skewer code as a subrepo in your example project: - - cd project-dir/ - git subrepo clone https://github.com/skupperproject/skewer subrepos/skewer - -Symlink the Skewer library into your `python` directory: - - mkdir -p python - ln -s ../subrepos/skewer/python/skewer.py python/skewer.py - -Symlink the `plano` command into the root of your project. Symlink -the standard `config/.planofile` as `.planofile` in the root as well: - - ln -s subrepos/skewer/plano - ln -s subrepos/skewer/config/.planofile - - - - - - - - - - -Use the `plano update-workflow` to copy the latest GitHub Actions -workflow file into your project: - - ./plano update-workflow - -Use your editor to create a `skewer.yaml` file in the root of your -project: - - emacs skewer.yaml - -Run the `./plano` command to see the available commands: - -~~~ console -$ ./plano -usage: plano [--verbose] [--quiet] [--debug] [-h] [-f FILE] {test,generate,render,run,run-external,demo} ... - -options: - --verbose Print detailed logging to the console - --quiet Print no logging to the console - --debug Print debugging output to the console - -h, --help Show this help message and exit - -f FILE, --file FILE Load commands from FILE (default 'Planofile' or '.planofile') - -commands: - {test,generate,render,run,run-external,demo} - test Test README generation and run the steps on Minikube - generate Generate README.md from the data in skewer.yaml - render Render README.html from the data in skewer.yaml - run Run the example steps using Minikube - run-external Run the example steps against external clusters - demo Run the example steps and pause before cleaning up -~~~ - -## Updating a Skewer subrepo inside your example project - -Use `git subrepo pull`: - - git subrepo pull --force subrepos/skewer - -Some older versions of git-subrepo won't complete a force pull. If -that happens, you can simply blow away your changes and get the latest -Skewer, using these commands: - - git subrepo clean subrepos/skewer - git rm -rf subrepos/skewer/ - git commit -am "Temporarily remove the previous version of Skewer" - git subrepo clone https://github.com/skupperproject/skewer subrepos/skewer - -## Skewer YAML - -The top level: - -~~~ yaml -title: # Your example's title (required) -subtitle: # Your chosen subtitle (required) -github_actions_url: # The URL of your workflow (optional) -overview: # Text introducing your example (optional) -prerequisites: # Text describing prerequisites (optional, has default text) -sites: # A map of named sites. See below. -steps: # A list of steps. See below. -summary: # Text to summarize what the user did (optional) -next_steps: # Text linking to more examples (optional, has default text) -~~~ - -A **site**: - -~~~ yaml -: - kubeconfig: # (required) - namespace: # (required) -~~~ - -A tilde (~) in the kubeconfig file path is replaced with a temporary -working directory during testing. - -Example sites: - -~~~ yaml -sites: - east: - kubeconfig: ~/.kube/config-east - namespace: east - west: - kubeconfig: ~/.kube/config-west - namespace: west -~~~ - -A **step**: - -~~~ yaml -- title: # The step title (required) - preamble: # Text before the commands (optional) - commands: # Named groups of commands. See below. - postamble: # Text after the commands (optional) -~~~ - -An example step: - -~~~ yaml -steps: - - title: Expose the frontend service - preamble: | - We have established connectivity between the two namespaces and - made the backend in `east` available to the frontend in `west`. - Before we can test the application, we need external access to - the frontend. - - Use `kubectl expose` with `--type LoadBalancer` to open network - access to the frontend service. Use `kubectl get services` to - check for the service and its external IP address. - commands: - east: - west: -~~~ - -Or you can use a named step from the library of standard steps: - -~~~ yaml -- standard: configure_separate_console_sessions -~~~ - -The standard steps are defined in -[python/skewer.yaml](python/skewer.yaml). - -You can override the `title`, `preamble`, `commands`, or `postamble` -field of a standard step by adding the field in addition to -`standard`: - -~~~ yaml -- standard: cleaning_up - commands: - east: - - run: skupper delete - - run: kubectl delete deployment/database - west: - - run: skupper delete -~~~ - -The initial steps are usually standard ones. There are also some -standard steps at the end. You may be able to use something like -this: - -~~~ yaml -steps: - - standard: configure_separate_console_sessions - - standard: access_your_clusters - - standard: set_up_your_namespaces - - standard: install_skupper_in_your_namespaces - - standard: check_the_status_of_your_namespaces - - standard: link_your_namespaces - - - standard: test_the_application - - standard: accessing_the_web_console - - standard: cleaning_up -~~~ - -Note that the `link_your_namespaces` and `test_the_application` steps -are less generic than the other steps, so check that the text and -commands they produce are doing what you need. If not, you'll need to -provide a custom step. - -The step commands are separated into named groups corresponding to the -sites. Each named group contains a list of command entries. Each -command entry has a `run` field containing a shell command and other -fields for awaiting completion or providing sample output. - -A **command**: - -~~~ yaml -- run: # A shell command (required) - apply: # Use this command only for "readme" or "test" (optional, default is both) - output: # Sample output to include in the README (optional) -~~~ - -Only the `run` and `output` fields are used in the README content. -The `output` field is used as sample output only, not for any kind of -testing. - -The `apply` field is useful when you want the readme instructions to -be different from the test procedure, or you simply want to omit -something. - -There is also a special `await` command you can use to pause for a -condition you require before going to the next step. It is used only -for testing and does not impact the README. - -~~~ yaml -- await: # A resource or list of resources for which to await readiness (optional) -~~~ - -Example commands: - -~~~ yaml -commands: - east: - - run: kubectl expose deployment/backend --port 8080 --type LoadBalancer - output: | - service/frontend exposed - west: - - await: service/backend - - run: kubectl get service/backend - output: | - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - backend ClusterIP 10.102.112.121 8080/TCP 30s -~~~ - -## Demo mode - -Skewer has a mode where it executes all the steps, but before cleaning -up and exiting, it pauses so you can inspect things. - -It is enabled by setting the environment variable `SKEWER_DEMO` to any -value when you call `./plano run` or one of its variants. You can -also use `./plano demo`, which sets the variable for you. diff --git a/subrepos/skewer/config/.github/workflows/main.yaml b/subrepos/skewer/config/.github/workflows/main.yaml deleted file mode 100644 index e32360c..0000000 --- a/subrepos/skewer/config/.github/workflows/main.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: main -on: - push: - pull_request: - schedule: - - cron: "0 0 * * 0" -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.x" - - uses: manusa/actions-setup-minikube@v2.6.0 - with: - minikube version: "v1.25.2" - kubernetes version: "v1.24.1" - github token: ${{ secrets.GITHUB_TOKEN }} - - run: curl -f https://skupper.io/install.sh | sh - - run: echo "$HOME/.local/bin" >> $GITHUB_PATH - - run: ./plano test diff --git a/subrepos/skewer/config/.planofile b/subrepos/skewer/config/.planofile deleted file mode 100644 index b3f1498..0000000 --- a/subrepos/skewer/config/.planofile +++ /dev/null @@ -1,62 +0,0 @@ -from skewer import * - -@command -def generate(app): - """ - Generate README.md from the data in skewer.yaml - """ - generate_readme("skewer.yaml", "README.md") - -@command -def render(app): - """ - Render README.html from the data in skewer.yaml - """ - check_program("pandoc") - - generate(app) - - run(f"pandoc -o README.html README.md") - - print(f"file:{get_real_path('README.html')}") - -@command -def run_(app): - """ - Run the example steps using Minikube - """ - run_steps_on_minikube("skewer.yaml") - -@command -def run_external(app, kubeconfig1, kubeconfig2): - """ - Run the example steps against external clusters - """ - run_steps_external("skewer.yaml", site1=kubeconfig1, site2=kubeconfig2) - -@command -def demo(app): - """ - Run the example steps and pause before cleaning up - """ - with working_env(SKEWER_DEMO=1): - run_steps_on_minikube("skewer.yaml") - -@command -def test(app): - """ - Test README generation and run the steps on Minikube - """ - generate_readme("skewer.yaml", make_temp_file()) - run_steps_on_minikube("skewer.yaml") - -@command -def update_workflow(app): - """ - Update the GitHub Actions workflow file - """ - - from_file = join("subrepos", "skewer", "config", ".github", "workflows", "main.yaml") - to_file = join(".github", "workflows", "main.yaml") - - copy(from_file, to_file) diff --git a/subrepos/skewer/plano b/subrepos/skewer/plano deleted file mode 120000 index 41ce1be..0000000 --- a/subrepos/skewer/plano +++ /dev/null @@ -1 +0,0 @@ -subrepos/plano/bin/plano \ No newline at end of file diff --git a/subrepos/skewer/python/plano.py b/subrepos/skewer/python/plano.py deleted file mode 120000 index 56374f0..0000000 --- a/subrepos/skewer/python/plano.py +++ /dev/null @@ -1 +0,0 @@ -../subrepos/plano/python/plano.py \ No newline at end of file diff --git a/subrepos/skewer/python/skewer.py b/subrepos/skewer/python/skewer.py deleted file mode 100644 index 02275ff..0000000 --- a/subrepos/skewer/python/skewer.py +++ /dev/null @@ -1,652 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from plano import * - -_standard_steps_yaml = """ -configure_separate_console_sessions: - title: Configure separate console sessions - preamble: | - Skupper is designed for use with multiple namespaces, typically on - different clusters. The `skupper` command uses your - [kubeconfig][kubeconfig] and current context to select the - namespace where it operates. - - [kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ - - Your kubeconfig is stored in a file in your home directory. The - `skupper` and `kubectl` commands use the `KUBECONFIG` environment - variable to locate it. - - A single kubeconfig supports only one active context per user. - Since you will be using multiple contexts at once in this - exercise, you need to create distinct kubeconfigs. - - Start a console session for each of your namespaces. Set the - `KUBECONFIG` environment variable to a different path in each - session. - commands: - "*": - - run: export KUBECONFIG=~/.kube/config-@namespace@ -access_your_clusters: - title: Access your clusters - preamble: | - The methods for accessing your clusters vary by Kubernetes - provider. Find the instructions for your chosen providers and use - them to authenticate and configure access for each console - session. See the following links for more information: - - * [Minikube](https://skupper.io/start/minikube.html) - * [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html) - * [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html) - * [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html) - * [IBM Kubernetes Service](https://skupper.io/start/ibmks.html) - * [OpenShift](https://skupper.io/start/openshift.html) - * [More providers](https://kubernetes.io/partners/#kcsp) -set_up_your_namespaces: - title: Set up your namespaces - preamble: | - Use `kubectl create namespace` to create the namespaces you wish - to use (or use existing namespaces). Use `kubectl config - set-context` to set the current namespace for each session. - commands: - "*": - - output: namespace/@namespace@ created - run: kubectl create namespace @namespace@ - - output: Context "minikube" modified. - run: kubectl config set-context --current --namespace @namespace@ -install_skupper_in_your_namespaces: - title: Install Skupper in your namespaces - preamble: | - The `skupper init` command installs the Skupper router and service - controller in the current namespace. Run the `skupper init` command - in each namespace. - - **Note:** If you are using Minikube, [you need to start `minikube - tunnel`][minikube-tunnel] before you install Skupper. - - [minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel - commands: - "*": - - run: skupper init - output: | - Waiting for LoadBalancer IP or hostname... - Skupper is now installed in namespace '@namespace@'. Use 'skupper status' to get more information. -check_the_status_of_your_namespaces: - title: Check the status of your namespaces - preamble: | - Use `skupper status` in each console to check that Skupper is - installed. - commands: - "*": - - await: [deployment/skupper-service-controller, deployment/skupper-router] - - run: skupper status - output: | - Skupper is enabled for namespace "@namespace@" in interior mode. It is connected to 1 other site. It has 1 exposed service. - The site console url is: - The credentials for internal console-auth mode are held in secret: 'skupper-console-users' - postamble: | - As you move through the steps below, you can use `skupper status` at - any time to check your progress. -link_your_namespaces: - title: Link your namespaces - preamble: | - Creating a link requires use of two `skupper` commands in - conjunction, `skupper token create` and `skupper link create`. - - The `skupper token create` command generates a secret token that - signifies permission to create a link. The token also carries the - link details. Then, in a remote namespace, The `skupper link - create` command uses the token to create a link to the namespace - that generated it. - - **Note:** The link token is truly a *secret*. Anyone who has the - token can link to your namespace. Make sure that only those you - trust have access to it. - - First, use `skupper token create` in one namespace to generate the - token. Then, use `skupper link create` in the other to create a - link. - commands: - "0": - - output: Token written to ~/secret.token - run: skupper token create ~/secret.token - "1": - - run: skupper link create ~/secret.token - output: | - Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) - Check the status of the link using 'skupper link status'. - - run: skupper link status --wait 60 - apply: test - postamble: | - If your console sessions are on different machines, you may need - to use `sftp` or a similar tool to transfer the token securely. - By default, tokens expire after a single use or 15 minutes after - creation. -test_the_application: - title: Test the application - preamble: | - Now we're ready to try it out. Use `kubectl get service/frontend` - to look up the external IP of the frontend service. Then use - `curl` or a similar tool to request the `/api/health` endpoint at - that address. - - **Note:** The `` field in the following commands is a - placeholder. The actual value is an IP address. - commands: - "0": - - run: kubectl get service/frontend - apply: readme - output: | - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s - - run: curl http://:8080/api/health - apply: readme - output: OK - - await_external_ip: service/frontend - - run: curl --fail --verbose --retry 60 --retry-connrefused --retry-delay 2 $(kubectl get service/frontend -o jsonpath='http://{.status.loadBalancer.ingress[0].ip}:8080/api/health'); echo - apply: test - postamble: | - If everything is in order, you can now access the web interface by - navigating to `http://:8080/` in your browser. -accessing_the_web_console: - title: Accessing the web console - numbered: false - preamble: | - Skupper includes a web console you can use to view the application - network. To access it, use `skupper status` to look up the URL of - the web console. Then use `kubectl get - secret/skupper-console-users` to look up the console admin - password. - - **Note:** The `` and `` fields in the - following output are placeholders. The actual values are specific - to your environment. - commands: - "0": - - run: skupper status - apply: readme - output: | - Skupper is enabled for namespace "@namespace@" in interior mode. It is connected to 1 other site. It has 1 exposed service. - The site console url is: - The credentials for internal console-auth mode are held in secret: 'skupper-console-users' - - run: kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d - apply: readme - output: - - await_external_ip: service/skupper - - run: curl --fail --insecure --verbose --retry 60 --retry-connrefused --retry-delay 2 $(kubectl get service/skupper -o jsonpath='https://{.status.loadBalancer.ingress[0].ip}:8080/') --user admin:$(kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d); echo - apply: test - postamble: | - Navigate to `` in your browser. When prompted, log - in as user `admin` and enter the password. -cleaning_up: - id: cleaning_up - title: Cleaning up - numbered: false - preamble: | - To remove Skupper and the other resources from this exercise, use - the following commands. - commands: - "*": - - run: skupper delete -""" - -_standard_steps = parse_yaml(_standard_steps_yaml) - -_example_suite_para = """ -This example is part of a [suite of examples][examples] showing the -different ways you can use [Skupper][website] to connect services -across cloud providers, data centers, and edge sites. - -[website]: https://skupper.io/ -[examples]: https://skupper.io/examples/index.html -""" - -_standard_prerequisites = """ -* The `kubectl` command-line tool, version 1.15 or later - ([installation guide][install-kubectl]) - -* The `skupper` command-line tool, the latest version ([installation - guide][install-skupper]) - -* Access to at least one Kubernetes cluster, from any provider you - choose - -[install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ -[install-skupper]: https://skupper.io/install/index.html -""" - -_standard_next_steps = """ -Check out the other [examples][examples] on the Skupper website. -""" - -def check_environment(): - check_program("base64") - check_program("curl") - check_program("kubectl") - check_program("skupper") - -# Eventually Kubernetes will make this nicer: -# https://github.com/kubernetes/kubernetes/pull/87399 -# https://github.com/kubernetes/kubernetes/issues/80828 -# https://github.com/kubernetes/kubernetes/issues/83094 -def await_resource(group, name, namespace=None): - base_command = "kubectl" - - if namespace is not None: - base_command = f"{base_command} -n {namespace}" - - notice(f"Waiting for {group}/{name} to become available") - - for i in range(90): - sleep(2) - - if run(f"{base_command} get {group}/{name}", check=False).exit_code == 0: - break - else: - fail(f"Timed out waiting for {group}/{name}") - - if group == "deployment": - try: - run(f"{base_command} wait --for condition=available --timeout 180s {group}/{name}") - except: - run(f"{base_command} logs {group}/{name}") - raise - -def await_external_ip(group, name, namespace=None): - await_resource(group, name, namespace=namespace) - - base_command = "kubectl" - - if namespace is not None: - base_command = f"{base_command} -n {namespace}" - - for i in range(90): - sleep(2) - - if call(f"{base_command} get {group}/{name} -o jsonpath='{{.status.loadBalancer.ingress}}'") != "": - break - else: - fail(f"Timed out waiting for external IP for {group}/{name}") - - return call(f"{base_command} get {group}/{name} -o jsonpath='{{.status.loadBalancer.ingress[0].ip}}'") - -def run_steps_on_minikube(skewer_file): - check_environment() - check_program("minikube") - - skewer_data = read_yaml(skewer_file) - work_dir = make_temp_dir() - - _apply_standard_steps(skewer_data) - - try: - run(f"minikube -p skewer start") - - for name, value in skewer_data["sites"].items(): - kubeconfig = value["kubeconfig"].replace("~", work_dir) - - with working_env(KUBECONFIG=kubeconfig): - run(f"minikube -p skewer update-context") - check_file(ENV["KUBECONFIG"]) - - with open("/tmp/minikube-tunnel-output", "w") as tunnel_output_file: - with start(f"minikube -p skewer tunnel", output=tunnel_output_file): - _run_steps(work_dir, skewer_data) - finally: - run(f"minikube -p skewer delete") - -def run_steps_external(skewer_file, **kubeconfigs): - check_environment() - - skewer_data = read_yaml(skewer_file) - work_dir = make_temp_dir() - - _apply_standard_steps(skewer_data) - - for name, kubeconfig in kubeconfigs.items(): - skewer_data["sites"][name]["kubeconfig"] = kubeconfig - - _run_steps(work_dir, skewer_data) - -def _run_steps(work_dir, skewer_data): - steps = list() - cleaning_up_step = None - - for step in skewer_data["steps"]: - if step.get("id") == "cleaning_up": - cleaning_up_step = step - else: - steps.append(step) - - try: - for step in steps: - _run_step(work_dir, skewer_data, step) - - if "SKEWER_DEMO" in ENV: - _pause_for_demo(work_dir, skewer_data) - except: - print("TROUBLE!") - print("-- Start of debug output") - - for site_name, site_data in skewer_data["sites"].items(): - kubeconfig = site_data["kubeconfig"].replace("~", work_dir) - print(f"---- Debug output for site '{site_name}'") - - with working_env(KUBECONFIG=kubeconfig): - run("kubectl get services", check=False) - run("kubectl get deployments", check=False) - run("kubectl get statefulsets", check=False) - run("kubectl get pods", check=False) - run("skupper version", check=False) - run("skupper status", check=False) - run("skupper link status", check=False) - run("skupper service status", check=False) - run("skupper gateway status", check=False) - run("skupper network status", check=False) - run("skupper debug events", check=False) - run("kubectl logs deployment/skupper-router", check=False) - run("kubectl logs deployment/skupper-service-controller", check=False) - - print("-- End of debug output") - finally: - if cleaning_up_step is not None: - _run_step(work_dir, skewer_data, cleaning_up_step, check=False) - -def _pause_for_demo(work_dir, skewer_data): - first_site_name, first_site_data = list(skewer_data["sites"].items())[0] - first_site_kubeconfig = first_site_data["kubeconfig"].replace("~", work_dir) - frontend_url = None - - with working_env(KUBECONFIG=first_site_kubeconfig): - console_ip = await_external_ip("service", "skupper") - console_url = f"https://{console_ip}:8080/" - password_data = call("kubectl get secret skupper-console-users -o jsonpath='{.data.admin}'") - password = base64_decode(password_data).decode("ascii") - - if run("kubectl get service/frontend", check=False, output=DEVNULL).exit_code == 0: - frontend_ip = await_external_ip("service", "frontend") - frontend_url = f"http://{frontend_ip}:8080/" - - print() - print("Demo time!") - print() - print("Sites:") - - for site_name, site_data in skewer_data["sites"].items(): - kubeconfig = site_data["kubeconfig"].replace("~", work_dir) - print(f" {site_name}: export KUBECONFIG={kubeconfig}") - - if frontend_url: - print() - print(f"Frontend URL: {frontend_url}") - - print() - print(f"Console URL: {console_url}") - print( "Console user: admin") - print(f"Console password: {password}") - print() - - while input("Are you done (yes)? ") != "yes": - pass - -def _run_step(work_dir, skewer_data, step_data, check=True): - if "commands" not in step_data: - return - - if "title" in step_data: - notice("Running step '{}'", step_data["title"]) - - try: - items = step_data["commands"].items() - except AttributeError: - items = list() - - for site_name in skewer_data["sites"]: - items.append((site_name, step_data["commands"])) - - for site_name, commands in items: - kubeconfig = skewer_data["sites"][site_name]["kubeconfig"].replace("~", work_dir) - - with working_env(KUBECONFIG=kubeconfig): - for command in commands: - if command.get("apply") == "readme": - continue - - if "run" in command: - run(command["run"].replace("~", work_dir), shell=True, check=check) - - if "await" in command: - resources = command["await"] - - if isinstance(resources, str): - resources = (resources,) - - for resource in resources: - group, name = resource.split("/", 1) - await_resource(group, name) - - if "await_external_ip" in command: - resources = command["await_external_ip"] - - if isinstance(resources, str): - resources = (resources,) - - for resource in resources: - group, name = resource.split("/", 1) - await_external_ip(group, name) - -def generate_readme(skewer_file, output_file): - skewer_data = read_yaml(skewer_file) - out = list() - - out.append(f"# {skewer_data['title']}") - out.append("") - - if "github_actions_url" in skewer_data: - url = skewer_data["github_actions_url"] - out.append(f"[![main]({url}/badge.svg)]({url})") - out.append("") - - if "subtitle" in skewer_data: - out.append(f"#### {skewer_data['subtitle']}") - out.append("") - - out.append(_example_suite_para) - out.append("") - out.append("#### Contents") - out.append("") - - if "overview" in skewer_data: - out.append("* [Overview](#overview)") - - out.append("* [Prerequisites](#prerequisites)") - - _apply_standard_steps(skewer_data) - - for i, step_data in enumerate(skewer_data["steps"], 1): - if step_data.get("numbered", True): - title = f"Step {i}: {step_data['title']}" - else: - title = step_data['title'] - - fragment = replace(title, " ", "_") - fragment = replace(fragment, r"[\W]", "") - fragment = replace(fragment, "_", "-") - fragment = fragment.lower() - - out.append(f"* [{title}](#{fragment})") - - if "summary" in skewer_data: - out.append("* [Summary](#summary)") - - if "next_steps" in skewer_data: - out.append("* [Next steps](#next-steps)") - - out.append("") - - if "overview" in skewer_data: - out.append("## Overview") - out.append("") - out.append(skewer_data["overview"].strip()) - out.append("") - - prerequisites = _standard_prerequisites - - if "prerequisites" in skewer_data: - prerequisites = skewer_data["prerequisites"].strip() - - out.append("## Prerequisites") - out.append("") - out.append(prerequisites) - out.append("") - - for i, step_data in enumerate(skewer_data["steps"], 1): - if step_data.get("numbered", True): - title = f"Step {i}: {step_data['title']}" - else: - title = step_data['title'] - - out.append(f"## {title}") - out.append("") - out.append(_generate_readme_step(skewer_data, step_data)) - out.append("") - - if "summary" in skewer_data: - out.append("## Summary") - out.append("") - out.append(skewer_data["summary"].strip()) - out.append("") - - next_steps = _standard_next_steps - - if "next_steps" in skewer_data: - next_steps = skewer_data["next_steps"].strip() - - out.append("## Next steps") - out.append("") - out.append(next_steps) - out.append("") - - write(output_file, "\n".join(out).strip() + "\n") - -def _generate_readme_step(skewer_data, step_data): - out = list() - - if "preamble" in step_data: - out.append(step_data["preamble"].strip()) - out.append("") - - if "commands" in step_data: - items = step_data["commands"].items() - - for i, item in enumerate(items): - site_name, commands = item - namespace = skewer_data["sites"][site_name]["namespace"] - outputs = list() - - out.append(f"_**Console for {namespace}:**_") - out.append("") - out.append("~~~ shell") - - for command in commands: - if command.get("apply") == "test": - continue - - if "run" in command: - out.append(command["run"]) - - if "output" in command: - assert "run" in command, command - - outputs.append((command["run"], command["output"])) - - out.append("~~~") - out.append("") - - if outputs: - out.append("_Sample output:_") - out.append("") - out.append("~~~ console") - out.append("\n\n".join((f"$ {run}\n{output.strip()}" for run, output in outputs))) - out.append("~~~") - out.append("") - - if "postamble" in step_data: - out.append(step_data["postamble"].strip()) - - return "\n".join(out).strip() - -def _apply_standard_steps(skewer_data): - for step_data in skewer_data["steps"]: - if "standard" not in step_data: - continue - - standard_step_data = _standard_steps[step_data["standard"]] - - if "id" not in step_data: - step_data["id"] = standard_step_data.get("id") - - if "title" not in step_data: - step_data["title"] = standard_step_data["title"] - - if "numbered" not in step_data: - step_data["numbered"] = standard_step_data.get("numbered", True) - - if "preamble" not in step_data: - if "preamble" in standard_step_data: - step_data["preamble"] = standard_step_data["preamble"] - - if "postamble" not in step_data: - if "postamble" in standard_step_data: - step_data["postamble"] = standard_step_data["postamble"] - - if "commands" not in step_data: - if "commands" in standard_step_data: - step_data["commands"] = dict() - - if "*" in standard_step_data["commands"]: - assert len(standard_step_data["commands"]) == 1, standard_step_data["commands"] - - for namespace, site_data in skewer_data["sites"].items(): - commands = standard_step_data["commands"]["*"] - - step_data["commands"][namespace] = _resolve_commands(commands, namespace) - else: - for site_index in standard_step_data["commands"]: - commands = standard_step_data["commands"][site_index] - namespace = list(skewer_data["sites"])[int(site_index)] - - step_data["commands"][namespace] = _resolve_commands(commands, namespace) - -def _resolve_commands(commands, namespace): - resolved_commands = list() - - for command in commands: - resolved_command = dict(command) - - if "run" in command: - resolved_command["run"] = command["run"].replace("@namespace@", namespace) - - if "output" in command: - resolved_command["output"] = command["output"].replace("@namespace@", namespace) - - resolved_commands.append(resolved_command) - - return resolved_commands diff --git a/subrepos/skewer/subrepos/plano/.github/workflows/main.yaml b/subrepos/skewer/subrepos/plano/.github/workflows/main.yaml deleted file mode 100644 index 789ed09..0000000 --- a/subrepos/skewer/subrepos/plano/.github/workflows/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: main -on: [push, pull_request] -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - run: make test - - run: make install - - run: echo "$HOME/.local/bin" >> $GITHUB_PATH - - run: plano-self-test diff --git a/subrepos/skewer/subrepos/plano/.gitrepo b/subrepos/skewer/subrepos/plano/.gitrepo deleted file mode 100644 index da92bbd..0000000 --- a/subrepos/skewer/subrepos/plano/.gitrepo +++ /dev/null @@ -1,12 +0,0 @@ -; DO NOT EDIT (unless you know what you are doing) -; -; This subdirectory is a git "subrepo", and this file is maintained by the -; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme -; -[subrepo] - remote = git@github.com:ssorj/plano.git - branch = main - commit = bf857cae8b01ded02049d4c1c441524e546a9283 - parent = db68ea37c04c006de2fd83c075201b94d9b81953 - method = merge - cmdver = 0.4.3 diff --git a/subrepos/skewer/subrepos/plano/Makefile b/subrepos/skewer/subrepos/plano/Makefile deleted file mode 100644 index 83ffddd..0000000 --- a/subrepos/skewer/subrepos/plano/Makefile +++ /dev/null @@ -1,114 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -.NOTPARALLEL: - -export PYTHONPATH := python:${PYTHONPATH} - -# A workaround for an install-with-prefix problem in Fedora 36 -# -# https://docs.fedoraproject.org/en-US/fedora/latest/release-notes/developers/Development_Python/#_pipsetup_py_installation_with_prefix -# https://bugzilla.redhat.com/show_bug.cgi?id=2026979 - -export RPM_BUILD_ROOT := fake - - -DESTDIR := / -PREFIX := ${HOME}/.local -DOCKER_COMMAND := podman - -.PHONY: default -default: build - -.PHONY: help -help: - @echo "build Build the code" - @echo "test Run the tests" - @echo "install Install the code" - @echo "clean Remove transient files from the checkout" - -.PHONY: build -build: - ./setup.py build - ./setup.py check - -.PHONY: install -install: clean - ./setup.py install --root ${DESTDIR} --prefix ${PREFIX} - -.PHONY: docs -docs: - mkdir -p build - sphinx-build -M html docs build/docs - -.PHONY: clean -clean: - find . -type f -name \*.pyc -delete - find . -type d -name __pycache__ -exec rm -rf \{} + - rm -rf build dist htmlcov .coverage test-project/build - -.PHONY: test -test: clean build - python3 scripts/test - $$(type -P python2) && python2 scripts/test || : - -.PHONY: big-test -big-test: test test-centos-7 test-centos-8 test-fedora test-ubuntu - -.PHONY: test-centos-8 -test-centos-8: - ${DOCKER_COMMAND} build -f scripts/test-centos-8.dockerfile -t plano-test-centos-8 . - ${DOCKER_COMMAND} run --rm plano-test-centos-8 - -.PHONY: test-centos-7 -test-centos-7: - ${DOCKER_COMMAND} build -f scripts/test-centos-7.dockerfile -t plano-test-centos-7 . - ${DOCKER_COMMAND} run --rm plano-test-centos-7 - -.PHONY: test-centos-6 -test-centos-6: - ${DOCKER_COMMAND} build -f scripts/test-centos-6.dockerfile -t plano-test-centos-6 . - ${DOCKER_COMMAND} run --rm plano-test-centos-6 - -.PHONY: test-fedora -test-fedora: - ${DOCKER_COMMAND} build -f scripts/test-fedora.dockerfile -t plano-test-fedora . - ${DOCKER_COMMAND} run --rm plano-test-fedora - -.PHONY: test-ubuntu -test-ubuntu: - ${DOCKER_COMMAND} build -f scripts/test-ubuntu.dockerfile -t plano-test-ubuntu . - ${DOCKER_COMMAND} run --rm plano-test-ubuntu - -.PHONY: test-bootstrap -test-bootstrap: - ${DOCKER_COMMAND} build -f scripts/test-bootstrap.dockerfile -t plano-test-bootstrap . - ${DOCKER_COMMAND} run --rm plano-test-bootstrap - -.PHONY: debug-bootstrap -debug-bootstrap: - ${DOCKER_COMMAND} build -f scripts/test-bootstrap.dockerfile -t plano-test-bootstrap . - ${DOCKER_COMMAND} run --rm -it plano-test-bootstrap /bin/bash - -.PHONY: coverage -coverage: - coverage3 run --omit /tmp/\* scripts/test - coverage3 report - coverage3 html - @echo file:${CURDIR}/htmlcov/index.html diff --git a/subrepos/skewer/subrepos/plano/README.md b/subrepos/skewer/subrepos/plano/README.md deleted file mode 100644 index 7857aeb..0000000 --- a/subrepos/skewer/subrepos/plano/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Plano - -[![main](https://github.com/ssorj/plano/workflows/main/badge.svg)](https://github.com/ssorj/plano/actions?query=workflow%3Amain) - -Python functions for writing shell-style system scripts. - -## Dependencies - - - curl - - make - - python - - tar - - findutils diff --git a/subrepos/skewer/subrepos/plano/bin/planosh b/subrepos/skewer/subrepos/plano/bin/planosh deleted file mode 100755 index 469a7ce..0000000 --- a/subrepos/skewer/subrepos/plano/bin/planosh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import os -import sys - -if os.path.islink(__file__): - source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - sys.path.insert(0, os.path.join(source_dir, "python")) - -if os.path.isdir("python"): - sys.path.insert(0, "python") - -from plano import PlanoShellCommand - -if __name__ == "__main__": - PlanoShellCommand().main() diff --git a/subrepos/skewer/subrepos/plano/python/bullseye.py b/subrepos/skewer/subrepos/plano/python/bullseye.py deleted file mode 100644 index ba1d76d..0000000 --- a/subrepos/skewer/subrepos/plano/python/bullseye.py +++ /dev/null @@ -1,319 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function - -import collections as _collections -import fnmatch as _fnmatch -import os as _os -import shutil as _shutil -import sys as _sys - -from plano import * - -class _Project: - def __init__(self): - self.name = None - self.source_dir = "python" - self.included_modules = ["*"] - self.excluded_modules = ["plano", "bullseye"] - self.data_dirs = [] - self.build_dir = "build" - self.test_modules = [] - -project = _Project() - -_default_prefix = join(get_home_dir(), ".local") - -def check_project(): - assert project.name - assert project.source_dir - assert project.build_dir - -class project_env(working_env): - def __init__(self): - check_project() - - home_var = "{0}_HOME".format(project.name.upper().replace("-", "_")) - - env = { - home_var: get_absolute_path(join(project.build_dir, project.name)), - "PATH": get_absolute_path(join(project.build_dir, "bin")) + ":" + ENV["PATH"], - "PYTHONPATH": get_absolute_path(join(project.build_dir, project.name, project.source_dir)), - } - - super(project_env, self).__init__(**env) - -def configure_file(input_file, output_file, substitutions, quiet=False): - notice("Configuring '{0}' for output '{1}'", input_file, output_file) - - content = read(input_file) - - for name, value in substitutions.items(): - content = content.replace("@{0}@".format(name), value) - - write(output_file, content) - - _shutil.copymode(input_file, output_file) - - return output_file - -_prefix_arg = CommandArgument("prefix", help="The base path for installed files", default=_default_prefix) -_clean_arg = CommandArgument("clean_", help="Clean before starting", display_name="clean") -_verbose_arg = CommandArgument("verbose", help="Print detailed logging to the console") - -@command(args=(_prefix_arg, _clean_arg)) -def build(app, prefix=None, clean_=False): - check_project() - - if clean_: - clean(app) - - build_file = join(project.build_dir, "build.json") - build_data = {} - - if exists(build_file): - build_data = read_json(build_file) - - mtime = _os.stat(project.source_dir).st_mtime - - for path in find(project.source_dir): - mtime = max(mtime, _os.stat(path).st_mtime) - - if prefix is None: - prefix = build_data.get("prefix", _default_prefix) - - new_build_data = {"prefix": prefix, "mtime": mtime} - - debug("Existing build data: {0}", pformat(build_data)) - debug("New build data: {0}", pformat(new_build_data)) - - if build_data == new_build_data: - debug("Already built") - return - - write_json(build_file, new_build_data) - - default_home = join(prefix, "lib", project.name) - - for path in find("bin", "*.in"): - configure_file(path, join(project.build_dir, path[:-3]), {"default_home": default_home}) - - for path in find("bin", exclude="*.in"): - copy(path, join(project.build_dir, path), inside=False, symlinks=False) - - for path in find(project.source_dir, "*.py"): - module_name = get_name_stem(path) - included = any([_fnmatch.fnmatchcase(module_name, x) for x in project.included_modules]) - excluded = any([_fnmatch.fnmatchcase(module_name, x) for x in project.excluded_modules]) - - if included and not excluded: - copy(path, join(project.build_dir, project.name, path), inside=False, symlinks=False) - - for dir_name in project.data_dirs: - for path in find(dir_name): - copy(path, join(project.build_dir, project.name, path), inside=False, symlinks=False) - -@command(args=(CommandArgument("include", help="Run only tests with names matching PATTERN", metavar="PATTERN"), - CommandArgument("exclude", help="Do not run tests with names matching PATTERN", metavar="PATTERN"), - CommandArgument("enable", help="Enable disabled tests matching PATTERN", metavar="PATTERN"), - CommandArgument("list_", help="Print the test names and exit", display_name="list"), - _verbose_arg, _clean_arg)) -def test(app, include="*", exclude=None, enable=None, list_=False, verbose=False, clean_=False): - check_project() - - if clean_: - clean(app) - - if not list_: - build(app) - - with project_env(): - from plano import _import_module - modules = [_import_module(x) for x in project.test_modules] - - if not modules: # pragma: nocover - notice("No tests found") - return - - args = list() - - if list_: - print_tests(modules) - return - - exclude = nvl(exclude, ()) - enable = nvl(enable, ()) - - run_tests(modules, include=include, exclude=exclude, enable=enable, verbose=verbose) - -@command(args=(CommandArgument("staging_dir", help="A path prepended to installed files"), - _prefix_arg, _clean_arg)) -def install(app, staging_dir="", prefix=None, clean_=False): - check_project() - - build(app, prefix=prefix, clean_=clean_) - - assert is_dir(project.build_dir), list_dir() - - build_file = join(project.build_dir, "build.json") - build_data = read_json(build_file) - build_prefix = project.build_dir + "/" - install_prefix = staging_dir + build_data["prefix"] - - for path in find(join(project.build_dir, "bin")): - copy(path, join(install_prefix, remove_prefix(path, build_prefix)), inside=False, symlinks=False) - - for path in find(join(project.build_dir, project.name)): - copy(path, join(install_prefix, "lib", remove_prefix(path, build_prefix)), inside=False, symlinks=False) - -@command -def clean(app): - check_project() - - remove(project.build_dir) - remove(find(".", "__pycache__")) - remove(find(".", "*.pyc")) - -@command(args=(CommandArgument("undo", help="Generate settings that restore the previous environment"),)) -def env(app, undo=False): - """ - Generate shell settings for the project environment - - To apply the settings, source the output from your shell: - - $ source <(plano env) - """ - - check_project() - - project_dir = get_current_dir() # XXX Needs some checking - home_var = "{0}_HOME".format(project.name.upper().replace("-", "_")) - old_home_var = "OLD_{0}".format(home_var) - home_dir = join(project_dir, project.build_dir, project.name) - - if undo: - print("[[ ${0} ]] && export {1}=${2} && unset {3}".format(old_home_var, home_var, old_home_var, old_home_var)) - print("[[ $OLD_PATH ]] && export PATH=$OLD_PATH && unset OLD_PATH") - print("[[ $OLD_PYTHONPATH ]] && export PYTHONPATH=$OLD_PYTHONPATH && unset OLD_PYTHONPATH") - - return - - print("[[ ${0} ]] && export {1}=${2}".format(home_var, old_home_var, home_var)) - print("[[ $PATH ]] && export OLD_PATH=$PATH") - print("[[ $PYTHONPATH ]] && export OLD_PYTHONPATH=$PYTHONPATH") - - print("export {0}={1}".format(home_var, home_dir)) - - path = [ - join(project_dir, project.build_dir, "bin"), - ENV.get("PATH", ""), - ] - - print("export PATH={0}".format(join_path_var(*path))) - - python_path = [ - join(home_dir, project.source_dir), - join(project_dir, project.source_dir), - ENV.get("PYTHONPATH", ""), - ] - - print("export PYTHONPATH={0}".format(join_path_var(*python_path))) - -@command(args=(CommandArgument("filename", help="Which file to generate"), - CommandArgument("stdout", help="Print to stdout instead of writing the file directly"))) -def generate(app, filename, stdout=False): - """ - Generate standard project files - - Use one of the following filenames: - - .gitignore - LICENSE.txt - README.md - VERSION.txt - - Use the special filename "all" to generate all of them. - """ - - assert project.name - - project_files = _StringCatalog(__file__) - - if filename == "all": - for name in project_files: - _generate_file(project_files, name, stdout) - else: - _generate_file(project_files, filename, stdout) - -def _generate_file(project_files, filename, stdout): - try: - content = project_files[filename] - except KeyError: - exit("File {0} is not one of the options".format(repr(filename))) - - content = content.lstrip() - content = content.format(project_title=project.name.capitalize(), project_name=project.name) - - if stdout: - print(content, end="") - else: - write(filename, content) - -# @command -# def coverage(app): -# check_program("coverage3") - -# with project_env(): -# run("coverage3 run --include python/qtools/\* build/scripts-3.9/qtools-self-test") -# run("coverage3 report") -# run("coverage3 html") - -# print(f"file:{get_current_dir()}/htmlcov/index.html") - -class _StringCatalog(dict): - def __init__(self, path): - super(_StringCatalog, self).__init__() - - self.path = "{0}.strings".format(split_extension(path)[0]) - - check_file(self.path) - - key = None - out = list() - - for line in read_lines(self.path): - line = line.rstrip() - - if line.startswith("[") and line.endswith("]"): - if key: - self[key] = "".join(out).strip() + "\n" - - out = list() - key = line[1:-1] - - continue - - out.append(line) - out.append("\r\n") - - self[key] = "".join(out).strip() + "\n" - - def __repr__(self): - return format_repr(self) diff --git a/subrepos/skewer/subrepos/plano/python/bullseye.strings b/subrepos/skewer/subrepos/plano/python/bullseye.strings deleted file mode 100644 index 2ad9f78..0000000 --- a/subrepos/skewer/subrepos/plano/python/bullseye.strings +++ /dev/null @@ -1,221 +0,0 @@ -[.gitignore] -*.pyc -__pycache__/ -/build - -[README.md] -# {project_title} - -[![main](https://github.com/ssorj/{project_name}/workflows/main/badge.svg)](https://github.com/ssorj/{project_name}/actions?query=workflow%3Amain) - -## Project commands - -You can use the `./plano` command in the root of the project to -perform project tasks. It accepts a subcommand. Use `./plano --help` -to list the available commands. - -[LICENSE.txt] -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{{}}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {{yyyy}} {{name of copyright owner}} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -[VERSION.txt] -0.1.0-SNAPSHOT diff --git a/subrepos/skewer/subrepos/plano/python/bullseye_tests.py b/subrepos/skewer/subrepos/plano/python/bullseye_tests.py deleted file mode 100644 index 4c8a0e1..0000000 --- a/subrepos/skewer/subrepos/plano/python/bullseye_tests.py +++ /dev/null @@ -1,145 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from bullseye import * -from plano import * - -from bullseye import test as test_command - -test_project_dir = join(get_parent_dir(get_parent_dir(__file__)), "test-project") -result_file = "build/result.json" - -class test_project(working_dir): - def __enter__(self): - dir = super(test_project, self).__enter__() - copy(test_project_dir, ".", inside=False) - return dir - -def run_plano(*args): - PlanoCommand().main(["-f", join(test_project_dir, "Planofile")] + list(args)) - -@test -def project_operations(): - project.name = "alphabet" - - with project_env(): - assert "ALPHABET_HOME" in ENV, ENV - - with working_dir(): - input_file = write("zeta-file", "X@replace-me@X") - output_file = configure_file(input_file, "zeta-file", {"replace-me": "Y"}) - output = read(output_file) - assert output == "XYX", output - -@test -def build_command(): - with test_project(): - run_plano("build") - - result = read_json(result_file) - assert result["built"], result - - check_file("build/bin/chucker") - check_file("build/bin/chucker-test") - check_file("build/chucker/python/chucker.py") - check_file("build/chucker/python/chucker_tests.py") - - result = read("build/bin/chucker").strip() - assert result.endswith(".local/lib/chucker"), result - - result = read_json("build/build.json") - assert result["prefix"].endswith(".local"), result - - run_plano("build", "--clean", "--prefix", "/usr/local") - - result = read("build/bin/chucker").strip() - assert result == "/usr/local/lib/chucker", result - - result = read_json("build/build.json") - assert result["prefix"] == ("/usr/local"), result - -@test -def test_command(): - with test_project(): - run_plano("test") - - check_file(result_file) - - result = read_json(result_file) - assert result["tested"], result - - run_plano("test", "--verbose") - run_plano("test", "--list") - run_plano("test", "--include", "test_hello") - run_plano("test", "--clean") - -@test -def install_command(): - with test_project(): - run_plano("install", "--staging-dir", "staging") - - result = read_json(result_file) - assert result["installed"], result - - check_dir("staging") - - with test_project(): - assert not exists("build"), list_dir() - - run_plano("build", "--prefix", "/opt/local") - run_plano("install", "--staging-dir", "staging") - - check_dir("staging/opt/local") - -@test -def clean_command(): - with test_project(): - run_plano("build") - - check_dir("build") - - run_plano("clean") - - assert not is_dir("build") - -@test -def env_command(): - with test_project(): - run_plano("env") - run_plano("env", "--undo") - -@test -def generate_command(): - with test_project(): - run_plano("generate", "README.md") - - assert exists("README.md"), list_dir() - - run_plano("generate", "--stdout", "LICENSE.txt") - - assert not exists("LICENSE.txt"), list_dir() - - run_plano("generate", "all") - - assert exists(".gitignore"), list_dir() - assert exists("LICENSE.txt"), list_dir() - assert exists("VERSION.txt"), list_dir() - - with expect_system_exit(): - run_plano("generate", "no-such-file") diff --git a/subrepos/skewer/subrepos/plano/python/plano.py b/subrepos/skewer/subrepos/plano/python/plano.py deleted file mode 100644 index 5e23069..0000000 --- a/subrepos/skewer/subrepos/plano/python/plano.py +++ /dev/null @@ -1,2366 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from __future__ import print_function - -import argparse as _argparse -import base64 as _base64 -import binascii as _binascii -import code as _code -import codecs as _codecs -import collections as _collections -import fnmatch as _fnmatch -import getpass as _getpass -import inspect as _inspect -import json as _json -import os as _os -import pprint as _pprint -import pkgutil as _pkgutil -import random as _random -import re as _re -import shlex as _shlex -import shutil as _shutil -import signal as _signal -import socket as _socket -import subprocess as _subprocess -import sys as _sys -import tempfile as _tempfile -import time as _time -import traceback as _traceback -import uuid as _uuid - -try: # pragma: nocover - import urllib.parse as _urlparse -except ImportError: # pragma: nocover - import urllib as _urlparse - -try: - import importlib as _importlib - - def _import_module(name): - return _importlib.import_module(name) -except ImportError: # pragma: nocover - def _import_module(name): - return __import__(name, fromlist=[""]) - -_max = max - -## Exceptions - -class PlanoException(Exception): - pass - -class PlanoError(PlanoException): - pass - -class PlanoTimeout(PlanoException): - pass - -class PlanoTestSkipped(Exception): - pass - -## Global variables - -ENV = _os.environ -ARGS = _sys.argv - -STDIN = _sys.stdin -STDOUT = _sys.stdout -STDERR = _sys.stderr -DEVNULL = _os.devnull - -PYTHON2 = _sys.version_info[0] == 2 -PYTHON3 = _sys.version_info[0] == 3 - -PLANO_DEBUG = "PLANO_DEBUG" in ENV - -## Archive operations - -def make_archive(input_dir, output_file=None, quiet=False): - """ - group: archive_operations - """ - - check_program("tar") - - archive_stem = get_base_name(input_dir) - - if output_file is None: - output_file = "{0}.tar.gz".format(join(get_current_dir(), archive_stem)) - - _info(quiet, "Making archive {0} from directory {1}", repr(output_file), repr(input_dir)) - - with working_dir(get_parent_dir(input_dir)): - run("tar -czf {0} {1}".format(output_file, archive_stem)) - - return output_file - -def extract_archive(input_file, output_dir=None, quiet=False): - check_program("tar") - - if output_dir is None: - output_dir = get_current_dir() - - _info(quiet, "Extracting archive {0} to directory {1}", repr(input_file), repr(output_dir)) - - input_file = get_absolute_path(input_file) - - with working_dir(output_dir): - run("tar -xf {0}".format(input_file)) - - return output_dir - -def rename_archive(input_file, new_archive_stem, quiet=False): - _info(quiet, "Renaming archive {0} with stem {1}", repr(input_file), repr(new_archive_stem)) - - output_dir = get_absolute_path(get_parent_dir(input_file)) - output_file = "{0}.tar.gz".format(join(output_dir, new_archive_stem)) - - input_file = get_absolute_path(input_file) - - with working_dir(): - extract_archive(input_file) - - input_name = list_dir()[0] - input_dir = move(input_name, new_archive_stem) - - make_archive(input_dir, output_file=output_file) - - remove(input_file) - - return output_file - -## Command operations - -class BaseCommand(object): - def main(self, args=None): - args = self.parse_args(args) - - assert args is None or isinstance(args, _argparse.Namespace), args - - self.verbose = args.verbose or args.debug - self.quiet = args.quiet - self.debug_enabled = args.debug - self.init_only = args.init_only - - level = "notice" - - if self.verbose: - level = "info" - - if self.quiet: - level = "error" - - if self.debug_enabled: - level = "debug" - - with logging_enabled(level=level): - try: - self.init(args) - - if self.init_only: - return - - self.run() - except KeyboardInterrupt: - pass - except PlanoError as e: - if self.debug_enabled: - _traceback.print_exc() - exit(1) - else: - exit(str(e)) - - def parse_args(self, args): # pragma: nocover - raise NotImplementedError() - - def init(self, args): # pragma: nocover - pass - - def run(self): # pragma: nocover - raise NotImplementedError() - -class BaseArgumentParser(_argparse.ArgumentParser): - def __init__(self, **kwargs): - super(BaseArgumentParser, self).__init__(**kwargs) - - self.allow_abbrev = False - self.formatter_class = _argparse.RawDescriptionHelpFormatter - - self.add_argument("--verbose", action="store_true", - help="Print detailed logging to the console") - self.add_argument("--quiet", action="store_true", - help="Print no logging to the console") - self.add_argument("--debug", action="store_true", - help="Print debugging output to the console") - self.add_argument("--init-only", action="store_true", - help=_argparse.SUPPRESS) - - _capitalize_help(self) - -# Patch the default help text -def _capitalize_help(parser): - try: - for action in parser._actions: - if action.help and action.help is not _argparse.SUPPRESS: - action.help = capitalize(action.help) - except: # pragma: nocover - pass - -## Console operations - -def flush(): - _sys.stdout.flush() - _sys.stderr.flush() - -def eprint(*args, **kwargs): - print(*args, file=_sys.stderr, **kwargs) - -def pprint(*args, **kwargs): - args = [pformat(x) for x in args] - print(*args, **kwargs) - -_color_codes = { - "black": "\u001b[30", - "red": "\u001b[31", - "green": "\u001b[32", - "yellow": "\u001b[33", - "blue": "\u001b[34", - "magenta": "\u001b[35", - "cyan": "\u001b[36", - "white": "\u001b[37", -} - -_color_reset = "\u001b[0m" - -def _get_color_code(color, bright): - elems = [_color_codes[color]] - - if bright: - elems.append(";1") - - elems.append("m") - - return "".join(elems) - -def _is_color_enabled(file): - return PYTHON3 and hasattr(file, "isatty") and file.isatty() - -class console_color(object): - def __init__(self, color=None, bright=False, file=_sys.stdout): - self.file = file - self.color_code = None - - if (color, bright) != (None, False): - self.color_code = _get_color_code(color, bright) - - self.enabled = self.color_code is not None and _is_color_enabled(self.file) - - def __enter__(self): - if self.enabled: - print(self.color_code, file=self.file, end="", flush=True) - - def __exit__(self, exc_type, exc_value, traceback): - if self.enabled: - print(_color_reset, file=self.file, end="", flush=True) - -def cformat(value, color=None, bright=False, file=_sys.stdout): - if (color, bright) != (None, False) and _is_color_enabled(file): - return "".join((_get_color_code(color, bright), value, _color_reset)) - else: - return value - -def cprint(*args, **kwargs): - color = kwargs.pop("color", "white") - bright = kwargs.pop("bright", False) - file = kwargs.get("file", _sys.stdout) - - with console_color(color, bright=bright, file=file): - print(*args, **kwargs) - -class output_redirected(object): - def __init__(self, output, quiet=False): - self.output = output - self.quiet = quiet - - def __enter__(self): - flush() - - _info(self.quiet, "Redirecting output to file {0}", repr(self.output)) - - if is_string(self.output): - output = open(self.output, "w") - - self.prev_stdout, self.prev_stderr = _sys.stdout, _sys.stderr - _sys.stdout, _sys.stderr = output, output - - def __exit__(self, exc_type, exc_value, traceback): - flush() - - _sys.stdout, _sys.stderr = self.prev_stdout, self.prev_stderr - -try: - breakpoint -except NameError: # pragma: nocover - def breakpoint(): - import pdb - pdb.set_trace() - -def repl(vars): # pragma: nocover - _code.InteractiveConsole(locals=vars).interact() - -def print_properties(props, file=None): - size = max([len(x[0]) for x in props]) - - for prop in props: - name = "{0}:".format(prop[0]) - template = "{{0:<{0}}} ".format(size + 1) - - print(template.format(name), prop[1], end="", file=file) - - for value in prop[2:]: - print(" {0}".format(value), end="", file=file) - - print(file=file) - -## Directory operations - -def find(dirs=None, include="*", exclude=()): - if dirs is None: - dirs = "." - - if is_string(dirs): - dirs = (dirs,) - - if is_string(include): - include = (include,) - - if is_string(exclude): - exclude = (exclude,) - - found = set() - - for dir in dirs: - for root, dir_names, file_names in _os.walk(dir): - names = dir_names + file_names - - for include_pattern in include: - names = _fnmatch.filter(names, include_pattern) - - for exclude_pattern in exclude: - for name in _fnmatch.filter(names, exclude_pattern): - names.remove(name) - - if root.startswith("./"): - root = remove_prefix(root, "./") - elif root == ".": - root = "" - - found.update([join(root, x) for x in names]) - - return sorted(found) - -def make_dir(dir, quiet=False): - if dir == "": - return dir - - if not exists(dir): - _info(quiet, "Making directory '{0}'", dir) - _os.makedirs(dir) - - return dir - -def make_parent_dir(path, quiet=False): - return make_dir(get_parent_dir(path), quiet=quiet) - -# Returns the current working directory so you can change it back -def change_dir(dir, quiet=False): - _debug(quiet, "Changing directory to {0}", repr(dir)) - - prev_dir = get_current_dir() - - if not dir: - return prev_dir - - _os.chdir(dir) - - return prev_dir - -def list_dir(dir=None, include="*", exclude=()): - if dir in (None, ""): - dir = get_current_dir() - - assert is_dir(dir) - - if is_string(include): - include = (include,) - - if is_string(exclude): - exclude = (exclude,) - - names = _os.listdir(dir) - - for include_pattern in include: - names = _fnmatch.filter(names, include_pattern) - - for exclude_pattern in exclude: - for name in _fnmatch.filter(names, exclude_pattern): - names.remove(name) - - return sorted(names) - -# No args constructor gets a temp dir -class working_dir(object): - def __init__(self, dir=None, quiet=False): - self.dir = dir - self.prev_dir = None - self.remove = False - self.quiet = quiet - - if self.dir is None: - self.dir = make_temp_dir() - self.remove = True - - def __enter__(self): - if self.dir == ".": - return - - _info(self.quiet, "Entering directory {0}", repr(get_absolute_path(self.dir))) - - make_dir(self.dir, quiet=True) - - self.prev_dir = change_dir(self.dir, quiet=True) - - return self.dir - - def __exit__(self, exc_type, exc_value, traceback): - if self.dir == ".": - return - - _debug(self.quiet, "Returning to directory {0}", repr(get_absolute_path(self.prev_dir))) - - change_dir(self.prev_dir, quiet=True) - - if self.remove: - remove(self.dir, quiet=True) - -## Environment operations - -def join_path_var(*paths): - return _os.pathsep.join(unique(skip(paths))) - -def get_current_dir(): - return _os.getcwd() - -def get_home_dir(user=None): - return _os.path.expanduser("~{0}".format(user or "")) - -def get_user(): - return _getpass.getuser() - -def get_hostname(): - return _socket.gethostname() - -def get_program_name(command=None): - if command is None: - args = ARGS - else: - args = command.split() - - for arg in args: - if "=" not in arg: - return get_base_name(arg) - -def which(program_name): - assert "PATH" in _os.environ, _os.environ - - for dir in _os.environ["PATH"].split(_os.pathsep): - program = join(dir, program_name) - - if _os.access(program, _os.X_OK): - return program - -def check_env(var, message=None): - if var not in _os.environ: - if message is None: - message = "Environment variable {0} is not set".format(repr(var)) - - raise PlanoError(message) - -def check_module(module, message=None): - if _pkgutil.find_loader(module) is None: - if message is None: - message = "Module {0} is not found".format(repr(module)) - - raise PlanoError(message) - -def check_program(program, message=None): - if which(program) is None: - if message is None: - message = "Program {0} is not found".format(repr(program)) - - raise PlanoError(message) - -class working_env(object): - def __init__(self, **vars): - self.amend = vars.pop("amend", True) - self.vars = vars - - def __enter__(self): - self.prev_vars = dict(_os.environ) - - if not self.amend: - for name, value in list(_os.environ.items()): - if name not in self.vars: - del _os.environ[name] - - for name, value in self.vars.items(): - _os.environ[name] = str(value) - - def __exit__(self, exc_type, exc_value, traceback): - for name, value in self.prev_vars.items(): - _os.environ[name] = value - - for name, value in self.vars.items(): - if name not in self.prev_vars: - del _os.environ[name] - -class working_module_path(object): - def __init__(self, path, amend=True): - if is_string(path): - if not is_absolute(path): - path = get_absolute_path(path) - - path = [path] - - if amend: - path = path + _sys.path - - self.path = path - - def __enter__(self): - self.prev_path = _sys.path - _sys.path = self.path - - def __exit__(self, exc_type, exc_value, traceback): - _sys.path = self.prev_path - -def print_env(file=None): - props = ( - ("ARGS", ARGS), - ("ENV['PATH']", ENV.get("PATH")), - ("ENV['PYTHONPATH']", ENV.get("PYTHONPATH")), - ("sys.executable", _sys.executable), - ("sys.path", _sys.path), - ("sys.version", _sys.version.replace("\n", "")), - ("get_current_dir()", get_current_dir()), - ("get_home_dir()", get_home_dir()), - ("get_hostname()", get_hostname()), - ("get_program_name()", get_program_name()), - ("get_user()", get_user()), - ("plano.__file__", __file__), - ("which('plano')", which("plano")), - ) - - print_properties(props, file=file) - -## File operations - -def touch(file, quiet=False): - _info(quiet, "Touching {0}", repr(file)) - - try: - _os.utime(file, None) - except OSError: - append(file, "") - - return file - -# symlinks=True - Preserve symlinks -# inside=True - Place from_path inside to_path if to_path is a directory -def copy(from_path, to_path, symlinks=True, inside=True, quiet=False): - _info(quiet, "Copying {0} to {1}", repr(from_path), repr(to_path)) - - if is_dir(to_path) and inside: - to_path = join(to_path, get_base_name(from_path)) - else: - make_parent_dir(to_path, quiet=True) - - if is_dir(from_path): - for name in list_dir(from_path): - copy(join(from_path, name), join(to_path, name), symlinks=symlinks, inside=False, quiet=True) - - _shutil.copystat(from_path, to_path) - elif is_link(from_path) and symlinks: - make_link(to_path, read_link(from_path), quiet=True) - else: - _shutil.copy2(from_path, to_path) - - return to_path - -# inside=True - Place from_path inside to_path if to_path is a directory -def move(from_path, to_path, inside=True, quiet=False): - _info(quiet, "Moving {0} to {1}", repr(from_path), repr(to_path)) - - to_path = copy(from_path, to_path, inside=inside, quiet=True) - remove(from_path, quiet=True) - - return to_path - -def remove(paths, quiet=False): - if is_string(paths): - paths = (paths,) - - for path in paths: - if not exists(path): - continue - - _debug(quiet, "Removing {0}", repr(path)) - - if is_dir(path): - _shutil.rmtree(path, ignore_errors=True) - else: - _os.remove(path) - -def get_file_size(file): - return _os.path.getsize(file) - -## IO operations - -def read(file): - with _codecs.open(file, encoding="utf-8", mode="r") as f: - return f.read() - -def write(file, string): - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="w") as f: - f.write(string) - - return file - -def append(file, string): - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="a") as f: - f.write(string) - - return file - -def prepend(file, string): - orig = read(file) - return write(file, string + orig) - -def tail(file, count): - return "".join(tail_lines(file, count)) - -def read_lines(file): - with _codecs.open(file, encoding="utf-8", mode="r") as f: - return f.readlines() - -def write_lines(file, lines): - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="w") as f: - f.writelines(lines) - - return file - -def append_lines(file, lines): - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="a") as f: - f.writelines(lines) - - return file - -def prepend_lines(file, lines): - orig_lines = read_lines(file) - - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="w") as f: - f.writelines(lines) - f.writelines(orig_lines) - - return file - -def tail_lines(file, count): - assert count >= 0 - - with _codecs.open(file, encoding="utf-8", mode="r") as f: - pos = count + 1 - lines = list() - - while len(lines) <= count: - try: - f.seek(-pos, 2) - except IOError: - f.seek(0) - break - finally: - lines = f.readlines() - - pos *= 2 - - return lines[-count:] - -def replace_in_file(file, expr, replacement, count=0): - write(file, replace(read(file), expr, replacement, count=count)) - -def concatenate(file, input_files): - assert file not in input_files - - make_parent_dir(file, quiet=True) - - with open(file, "wb") as f: - for input_file in input_files: - if not exists(input_file): - continue - - with open(input_file, "rb") as inf: - _shutil.copyfileobj(inf, f) - -## Iterable operations - -def unique(iterable): - return list(_collections.OrderedDict.fromkeys(iterable).keys()) - -def skip(iterable, values=(None, "", (), [], {})): - if is_scalar(values): - values = (values,) - - items = list() - - for item in iterable: - if item not in values: - items.append(item) - - return items - -## JSON operations - -def read_json(file): - with _codecs.open(file, encoding="utf-8", mode="r") as f: - return _json.load(f) - -def write_json(file, data): - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="w") as f: - _json.dump(data, f, indent=4, separators=(",", ": "), sort_keys=True) - - return file - -def parse_json(json): - return _json.loads(json) - -def emit_json(data): - return _json.dumps(data, indent=4, separators=(",", ": "), sort_keys=True) - -## HTTP operations - -def _run_curl(method, url, content=None, content_file=None, content_type=None, output_file=None, insecure=False): - check_program("curl") - - options = [ - "-sf", - "-X", method, - "-H", "'Expect:'", - ] - - if content is not None: - assert content_file is None - options.extend(("-d", "@-")) - - if content_file is not None: - assert content is None, content - options.extend(("-d", "@{0}".format(content_file))) - - if content_type is not None: - options.extend(("-H", "'Content-Type: {0}'".format(content_type))) - - if output_file is not None: - options.extend(("-o", output_file)) - - if insecure: - options.append("--insecure") - - options = " ".join(options) - command = "curl {0} {1}".format(options, url) - - if output_file is None: - return call(command, input=content) - else: - make_parent_dir(output_file, quiet=True) - run(command, input=content) - -def http_get(url, output_file=None, insecure=False): - return _run_curl("GET", url, output_file=output_file, insecure=insecure) - -def http_get_json(url, insecure=False): - return parse_json(http_get(url, insecure=insecure)) - -def http_put(url, content, content_type=None, insecure=False): - _run_curl("PUT", url, content=content, content_type=content_type, insecure=insecure) - -def http_put_file(url, content_file, content_type=None, insecure=False): - _run_curl("PUT", url, content_file=content_file, content_type=content_type, insecure=insecure) - -def http_put_json(url, data, insecure=False): - http_put(url, emit_json(data), content_type="application/json", insecure=insecure) - -def http_post(url, content, content_type=None, output_file=None, insecure=False): - return _run_curl("POST", url, content=content, content_type=content_type, output_file=output_file, insecure=insecure) - -def http_post_file(url, content_file, content_type=None, output_file=None, insecure=False): - return _run_curl("POST", url, content_file=content_file, content_type=content_type, output_file=output_file, insecure=insecure) - -def http_post_json(url, data, insecure=False): - return parse_json(http_post(url, emit_json(data), content_type="application/json", insecure=insecure)) - -## Link operations - -def make_link(path, linked_path, quiet=False): - _info(quiet, "Making link {0} to {1}", repr(path), repr(linked_path)) - - make_parent_dir(path, quiet=True) - remove(path, quiet=True) - - _os.symlink(linked_path, path) - - return path - -def read_link(path): - return _os.readlink(path) - -## Logging operations - -_logging_levels = ( - "debug", - "info", - "notice", - "warn", - "error", - "disabled", -) - -_DEBUG = _logging_levels.index("debug") -_INFO = _logging_levels.index("info") -_NOTICE = _logging_levels.index("notice") -_WARN = _logging_levels.index("warn") -_ERROR = _logging_levels.index("error") -_DISABLED = _logging_levels.index("disabled") - -_logging_output = None -_logging_threshold = _NOTICE - -def enable_logging(level="notice", output=None): - assert level in _logging_levels - - info("Enabling logging (level={0}, output={1})", repr(level), repr(nvl(output, "stderr"))) - - global _logging_threshold - _logging_threshold = _logging_levels.index(level) - - if is_string(output): - output = open(output, "w") - - global _logging_output - _logging_output = output - -def disable_logging(): - info("Disabling logging") - - global _logging_threshold - _logging_threshold = _DISABLED - -class logging_enabled(object): - def __init__(self, level="notice", output=None): - self.level = level - self.output = output - - def __enter__(self): - self.prev_level = _logging_levels[_logging_threshold] - self.prev_output = _logging_output - - if self.level == "disabled": - disable_logging() - else: - enable_logging(level=self.level, output=self.output) - - def __exit__(self, exc_type, exc_value, traceback): - if self.prev_level == "disabled": - disable_logging() - else: - enable_logging(level=self.prev_level, output=self.prev_output) - -class logging_disabled(logging_enabled): - def __init__(self): - super(logging_disabled, self).__init__(level="disabled") - -def fail(message, *args): - error(message, *args) - - if isinstance(message, BaseException): - raise message - - raise PlanoError(message.format(*args)) - -def error(message, *args): - log(_ERROR, message, *args) - -def warn(message, *args): - log(_WARN, message, *args) - -def notice(message, *args): - log(_NOTICE, message, *args) - -def info(message, *args): - log(_INFO, message, *args) - -def debug(message, *args): - log(_DEBUG, message, *args) - -def log(level, message, *args): - if is_string(level): - level = _logging_levels.index(level) - - if _logging_threshold <= level: - _print_message(level, message, args) - -def _print_message(level, message, args): - out = nvl(_logging_output, _sys.stderr) - exception = None - - if isinstance(message, BaseException): - exception = message - message = "{0}: {1}".format(type(message).__name__, str(message)) - else: - message = str(message) - - if args: - message = message.format(*args) - - program = "{0}:".format(get_program_name()) - - level_color = ("cyan", "cyan", "blue", "yellow", "red", None)[level] - level_bright = (False, False, False, False, True, False)[level] - level = cformat("{0:>6}:".format(_logging_levels[level]), color=level_color, bright=level_bright, file=out) - - print(program, level, capitalize(message), file=out) - - if exception is not None and hasattr(exception, "__traceback__"): - _traceback.print_exception(type(exception), exception, exception.__traceback__, file=out) - - out.flush() - -def _debug(quiet, message, *args): - if quiet: - debug(message, *args) - else: - notice(message, *args) - -def _info(quiet, message, *args): - if quiet: - info(message, *args) - else: - notice(message, *args) - -## Path operations - -def get_absolute_path(path): - return _os.path.abspath(path) - -def normalize_path(path): - return _os.path.normpath(path) - -def get_real_path(path): - return _os.path.realpath(path) - -def get_relative_path(path, start=None): - return _os.path.relpath(path, start=start) - -def get_file_url(path): - return "file:{0}".format(get_absolute_path(path)) - -def exists(path): - return _os.path.lexists(path) - -def is_absolute(path): - return _os.path.isabs(path) - -def is_dir(path): - return _os.path.isdir(path) - -def is_file(path): - return _os.path.isfile(path) - -def is_link(path): - return _os.path.islink(path) - -def join(*paths): - return _os.path.join(*paths) - -def split(path): - return _os.path.split(path) - -def split_extension(path): - return _os.path.splitext(path) - -def get_parent_dir(path): - path = normalize_path(path) - parent, child = split(path) - - return parent - -def get_base_name(path): - path = normalize_path(path) - parent, name = split(path) - - return name - -def get_name_stem(file): - name = get_base_name(file) - - if name.endswith(".tar.gz"): - name = name[:-3] - - stem, ext = split_extension(name) - - return stem - -def get_name_extension(file): - name = get_base_name(file) - stem, ext = split_extension(name) - - return ext - -def _check_path(path, test_func, message): - if not test_func(path): - found_paths = [repr(x) for x in list_dir(get_parent_dir(path))] - message = "{0}. The parent directory contains: {1}".format(message.format(repr(path)), ", ".join(found_paths)) - - raise PlanoError(message) - -def check_exists(path): - _check_path(path, exists, "File or directory {0} not found") - -def check_file(path): - _check_path(path, is_file, "File {0} not found") - -def check_dir(path): - _check_path(path, is_dir, "Directory {0} not found") - -def await_exists(path, timeout=30, quiet=False): - _info(quiet, "Waiting for path {0} to exist", repr(path)) - - timeout_message = "Timed out waiting for path {0} to exist".format(path) - period = 0.03125 - - with Timer(timeout=timeout, timeout_message=timeout_message) as timer: - while True: - try: - check_exists(path) - except PlanoError: - sleep(period, quiet=True) - period = min(1, period * 2) - else: - return - -## Port operations - -def get_random_port(min=49152, max=65535): - ports = [_random.randint(min, max) for _ in range(3)] - - for port in ports: - try: - check_port(port) - except PlanoError: - return port - - raise PlanoError("Random ports unavailable") - -def check_port(port, host="localhost"): - sock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) - sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) - - if sock.connect_ex((host, port)) != 0: - raise PlanoError("Port {0} (host {1}) is not reachable".format(repr(port), repr(host))) - -def await_port(port, host="localhost", timeout=30, quiet=False): - _info(quiet, "Waiting for port {0}", port) - - if is_string(port): - port = int(port) - - timeout_message = "Timed out waiting for port {0} to open".format(port) - period = 0.03125 - - with Timer(timeout=timeout, timeout_message=timeout_message) as timer: - while True: - try: - check_port(port, host=host) - except PlanoError: - sleep(period, quiet=True) - period = min(1, period * 2) - else: - return - -## Process operations - -def get_process_id(): - return _os.getpid() - -def _format_command(command, represent=True): - if not is_string(command): - command = " ".join(command) - - if represent: - return repr(command) - else: - return command - -# quiet=False - Don't log at notice level -# stash=False - No output unless there is an error -# output= - Send stdout and stderr to a file -# stdin= - XXX -# stdout= - Send stdout to a file -# stderr= - Send stderr to a file -# shell=False - XXX -def start(command, stdin=None, stdout=None, stderr=None, output=None, shell=False, stash=False, quiet=False): - _info(quiet, "Starting command {0}", _format_command(command)) - - if output is not None: - stdout, stderr = output, output - - if is_string(stdin): - stdin = open(stdin, "r") - - if is_string(stdout): - stdout = open(stdout, "w") - - if is_string(stderr): - stderr = open(stderr, "w") - - if stdin is None: - stdin = _sys.stdin - - if stdout is None: - stdout = _sys.stdout - - if stderr is None: - stderr = _sys.stderr - - stash_file = None - - if stash: - stash_file = make_temp_file() - out = open(stash_file, "w") - stdout = out - stderr = out - - if shell: - if is_string(command): - args = command - else: - args = " ".join(command) - else: - if is_string(command): - args = _shlex.split(command) - else: - args = command - - try: - proc = PlanoProcess(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, close_fds=True, stash_file=stash_file) - except OSError as e: - raise PlanoError("Command {0}: {1}".format(_format_command(command), str(e))) - - debug("{0} started", proc) - - return proc - -def stop(proc, timeout=None, quiet=False): - _info(quiet, "Stopping {0}", proc) - - if proc.poll() is not None: - if proc.exit_code == 0: - debug("{0} already exited normally", proc) - elif proc.exit_code == -(_signal.SIGTERM): - debug("{0} was already terminated", proc) - else: - debug("{0} already exited with code {1}", proc, proc.exit_code) - - return proc - - kill(proc, quiet=True) - - return wait(proc, timeout=timeout, quiet=True) - -def kill(proc, quiet=False): - _info(quiet, "Killing {0}", proc) - - proc.terminate() - -def wait(proc, timeout=None, check=False, quiet=False): - _info(quiet, "Waiting for {0} to exit", proc) - - if PYTHON2: # pragma: nocover - assert timeout is None, "The timeout option is not supported on Python 2" - proc.wait() - else: - try: - proc.wait(timeout=timeout) - except _subprocess.TimeoutExpired: - raise PlanoTimeout() - - if proc.exit_code == 0: - debug("{0} exited normally", proc) - elif proc.exit_code < 0: - debug("{0} was terminated by signal {1}", proc, abs(proc.exit_code)) - else: - debug("{0} exited with code {1}", proc, proc.exit_code) - - if proc.stash_file is not None: - if proc.exit_code > 0: - eprint(read(proc.stash_file), end="") - - remove(proc.stash_file, quiet=True) - - if check and proc.exit_code > 0: - raise PlanoProcessError(proc) - - return proc - -# input= - Pipe to the process -def run(command, stdin=None, stdout=None, stderr=None, input=None, output=None, - stash=False, shell=False, check=True, quiet=False): - _info(quiet, "Running command {0}", _format_command(command)) - - if input is not None: - assert stdin in (None, _subprocess.PIPE), stdin - - input = input.encode("utf-8") - stdin = _subprocess.PIPE - - proc = start(command, stdin=stdin, stdout=stdout, stderr=stderr, output=output, - stash=stash, shell=shell, quiet=True) - - proc.stdout_result, proc.stderr_result = proc.communicate(input=input) - - if proc.stdout_result is not None: - proc.stdout_result = proc.stdout_result.decode("utf-8") - - if proc.stderr_result is not None: - proc.stderr_result = proc.stderr_result.decode("utf-8") - - return wait(proc, check=check, quiet=True) - -# input= - Pipe the given input into the process -def call(command, input=None, shell=False, quiet=False): - _info(quiet, "Calling {0}", _format_command(command)) - - proc = run(command, stdin=_subprocess.PIPE, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE, - input=input, shell=shell, check=True, quiet=True) - - return proc.stdout_result - -def exit(arg=None, *args, **kwargs): - verbose = kwargs.get("verbose", False) - - if arg in (0, None): - if verbose: - notice("Exiting normally") - - _sys.exit() - - if is_string(arg): - if args: - arg = arg.format(*args) - - if verbose: - error(arg) - - _sys.exit(arg) - - if isinstance(arg, BaseException): - if verbose: - error(arg) - - _sys.exit(str(arg)) - - if isinstance(arg, int): - _sys.exit(arg) - - raise PlanoException("Illegal argument") - -_child_processes = list() - -class PlanoProcess(_subprocess.Popen): - def __init__(self, args, **options): - self.stash_file = options.pop("stash_file", None) - - super(PlanoProcess, self).__init__(args, **options) - - self.args = args - self.stdout_result = None - self.stderr_result = None - - _child_processes.append(self) - - @property - def exit_code(self): - return self.returncode - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - kill(self) - - def __repr__(self): - return "process {0} (command {1})".format(self.pid, _format_command(self.args)) - -class PlanoProcessError(_subprocess.CalledProcessError, PlanoError): - def __init__(self, proc): - super(PlanoProcessError, self).__init__(proc.exit_code, _format_command(proc.args, represent=False)) - -def _default_sigterm_handler(signum, frame): - for proc in _child_processes: - if proc.poll() is None: - proc.terminate() - - exit(-(_signal.SIGTERM)) - -_signal.signal(_signal.SIGTERM, _default_sigterm_handler) - -## String operations - -def replace(string, expr, replacement, count=0): - return _re.sub(expr, replacement, string, count) - -def remove_prefix(string, prefix): - if string is None: - return "" - - if prefix and string.startswith(prefix): - string = string[len(prefix):] - - return string - -def remove_suffix(string, suffix): - if string is None: - return "" - - if suffix and string.endswith(suffix): - string = string[:-len(suffix)] - - return string - -def shorten(string, max, ellipsis=None): - assert max is None or isinstance(max, int) - - if string is None: - return "" - - if max is None or len(string) < max: - return string - else: - if ellipsis is not None: - string = string + ellipsis - end = _max(0, max - len(ellipsis)) - return string[0:end] + ellipsis - else: - return string[0:max] - -def plural(noun, count=0, plural=None): - if noun in (None, ""): - return "" - - if count == 1: - return noun - - if plural is None: - if noun.endswith("s"): - plural = "{0}ses".format(noun) - else: - plural = "{0}s".format(noun) - - return plural - -def capitalize(string): - if not string: - return "" - - return string[0].upper() + string[1:] - -def base64_encode(string): - return _base64.b64encode(string) - -def base64_decode(string): - return _base64.b64decode(string) - -def url_encode(string): - return _urlparse.quote_plus(string) - -def url_decode(string): - return _urlparse.unquote_plus(string) - -## Temp operations - -def get_system_temp_dir(): - return _tempfile.gettempdir() - -def get_user_temp_dir(): - try: - return _os.environ["XDG_RUNTIME_DIR"] - except KeyError: - return join(get_system_temp_dir(), get_user()) - -def make_temp_file(suffix="", dir=None): - if dir is None: - dir = get_system_temp_dir() - - return _tempfile.mkstemp(prefix="plano-", suffix=suffix, dir=dir)[1] - -def make_temp_dir(suffix="", dir=None): - if dir is None: - dir = get_system_temp_dir() - - return _tempfile.mkdtemp(prefix="plano-", suffix=suffix, dir=dir) - -class temp_file(object): - def __init__(self, suffix="", dir=None): - self.file = make_temp_file(suffix=suffix, dir=dir) - - def __enter__(self): - return self.file - - def __exit__(self, exc_type, exc_value, traceback): - remove(self.file, quiet=True) - -class temp_dir(object): - def __init__(self, suffix="", dir=None): - self.dir = make_temp_dir(suffix=suffix, dir=dir) - - def __enter__(self): - return self.dir - - def __exit__(self, exc_type, exc_value, traceback): - remove(self.dir, quiet=True) - -## Time operations - -def sleep(seconds, quiet=False): - _info(quiet, "Sleeping for {0} {1}", seconds, plural("second", seconds)) - - _time.sleep(seconds) - -def get_time(): - return _time.time() - -def format_duration(duration, align=False): - assert duration >= 0 - - if duration >= 3600: - value = duration / 3600 - unit = "h" - elif duration >= 5 * 60: - value = duration / 60 - unit = "m" - else: - value = duration - unit = "s" - - if align: - return "{0:.1f}{1}".format(value, unit) - elif value > 10: - return "{0:.0f}{1}".format(value, unit) - else: - return remove_suffix("{0:.1f}".format(value), ".0") + unit - -class Timer(object): - def __init__(self, timeout=None, timeout_message=None): - self.timeout = timeout - self.timeout_message = timeout_message - - self.start_time = None - self.stop_time = None - - def start(self): - self.start_time = get_time() - - if self.timeout is not None: - self.prev_handler = _signal.signal(_signal.SIGALRM, self.raise_timeout) - self.prev_timeout, prev_interval = _signal.setitimer(_signal.ITIMER_REAL, self.timeout) - self.prev_timer_suspend_time = get_time() - - assert prev_interval == 0.0, "This case is not yet handled" - - def stop(self): - self.stop_time = get_time() - - if self.timeout is not None: - assert get_time() - self.prev_timer_suspend_time > 0, "This case is not yet handled" - - _signal.signal(_signal.SIGALRM, self.prev_handler) - _signal.setitimer(_signal.ITIMER_REAL, self.prev_timeout) - - def __enter__(self): - self.start() - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.stop() - - @property - def elapsed_time(self): - assert self.start_time is not None - - if self.stop_time is None: - return get_time() - self.start_time - else: - return self.stop_time - self.start_time - - def raise_timeout(self, *args): - raise PlanoTimeout(self.timeout_message) - -## Unique ID operations - -# Length in bytes, renders twice as long in hex -def get_unique_id(bytes=16): - assert bytes >= 1 - assert bytes <= 16 - - uuid_bytes = _uuid.uuid4().bytes - uuid_bytes = uuid_bytes[:bytes] - - return _binascii.hexlify(uuid_bytes).decode("utf-8") - -## Value operations - -def nvl(value, replacement): - if value is None: - return replacement - - return value - -def is_string(value): - return isinstance(value, str) - -def is_scalar(value): - return value is None or isinstance(value, (str, int, float, complex, bool)) - -def is_empty(value): - return value in (None, "", (), [], {}) - -def pformat(value): - return _pprint.pformat(value, width=120) - -def format_empty(value, replacement): - if is_empty(value): - value = replacement - - return value - -def format_not_empty(value, template=None): - if not is_empty(value) and template is not None: - value = template.format(value) - - return value - -def format_repr(obj, limit=None): - attrs = ["{0}={1}".format(k, repr(v)) for k, v in obj.__dict__.items()] - return "{0}({1})".format(obj.__class__.__name__, ", ".join(attrs[:limit])) - -class Namespace(object): - def __init__(self, **kwargs): - for name in kwargs: - setattr(self, name, kwargs[name]) - - def __eq__(self, other): - return vars(self) == vars(other) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return format_repr(self) - -## YAML operations - -def read_yaml(file): - import yaml as _yaml - - with _codecs.open(file, encoding="utf-8", mode="r") as f: - return _yaml.safe_load(f) - -def write_yaml(file, data): - import yaml as _yaml - - make_parent_dir(file, quiet=True) - - with _codecs.open(file, encoding="utf-8", mode="w") as f: - _yaml.safe_dump(data, f) - - return file - -def parse_yaml(yaml): - import yaml as _yaml - return _yaml.safe_load(yaml) - -def emit_yaml(data): - import yaml as _yaml - return _yaml.safe_dump(data) - -## Test operations - -def test(_function=None, name=None, timeout=None, disabled=False): - class Test(object): - def __init__(self, function): - self.function = function - self.name = nvl(name, self.function.__name__) - self.timeout = timeout - self.disabled = disabled - - self.module = _inspect.getmodule(self.function) - - if not hasattr(self.module, "_plano_tests"): - self.module._plano_tests = list() - - self.module._plano_tests.append(self) - - def __call__(self, test_run): - try: - self.function() - except SystemExit as e: - error(e) - raise PlanoError("System exit with code {0}".format(e)) - - def __repr__(self): - return "test '{0}:{1}'".format(self.module.__name__, self.name) - - if _function is None: - return Test - else: - return Test(_function) - -def print_tests(modules): - if _inspect.ismodule(modules): - modules = (modules,) - - for module in modules: - for test in module._plano_tests: - print(test) - -def run_tests(modules, include="*", exclude=(), enable=(), test_timeout=300, fail_fast=False, verbose=False, quiet=False): - if _inspect.ismodule(modules): - modules = (modules,) - - if is_string(include): - include = (include,) - - if is_string(exclude): - exclude = (exclude,) - - if is_string(enable): - enable = (enable,) - - test_run = TestRun(test_timeout=test_timeout, fail_fast=fail_fast, verbose=verbose, quiet=quiet) - - if verbose: - notice("Starting {0}", test_run) - elif not quiet: - cprint("=== Configuration ===", color="cyan") - - props = ( - ("Modules", format_empty(", ".join([x.__name__ for x in modules]), "[none]")), - ("Test timeout", format_duration(test_timeout)), - ("Fail fast", fail_fast), - ) - - print_properties(props) - print() - - for module in modules: - if verbose: - notice("Running tests from module {0} (file {1})", repr(module.__name__), repr(module.__file__)) - elif not quiet: - cprint("=== Module {} ===".format(repr(module.__name__)), color="cyan") - - if not hasattr(module, "_plano_tests"): - warn("Module {0} has no tests", repr(module.__name__)) - continue - - for test in module._plano_tests: - included = any([_fnmatch.fnmatchcase(test.name, x) for x in include]) - excluded = any([_fnmatch.fnmatchcase(test.name, x) for x in exclude]) - disabled = test.disabled and not any([_fnmatch.fnmatchcase(test.name, x) for x in enable]) - - if included and not excluded and not disabled: - test_run.tests.append(test) - _run_test(test_run, test) - - if not verbose and not quiet: - print() - - total = len(test_run.tests) - skipped = len(test_run.skipped_tests) - failed = len(test_run.failed_tests) - - if total == 0: - raise PlanoError("No tests ran") - - if failed == 0: - result_message = "All tests passed ({0} skipped)".format(skipped) - else: - result_message = "{0} {1} failed ({2} skipped)".format(failed, plural("test", failed), skipped) - - if verbose: - if failed == 0: - notice(result_message) - else: - error(result_message) - elif not quiet: - cprint("=== Summary ===", color="cyan") - - props = ( - ("Total", total), - ("Skipped", skipped, format_not_empty(", ".join([x.name for x in test_run.skipped_tests]), "({0})")), - ("Failed", failed, format_not_empty(", ".join([x.name for x in test_run.failed_tests]), "({0})")), - ) - - print_properties(props) - print() - - cprint("=== RESULT ===", color="cyan") - - if failed == 0: - cprint(result_message, color="green") - else: - cprint(result_message, color="red", bright="True") - - print() - - if failed != 0: - raise PlanoError(result_message) - -def _run_test(test_run, test): - if test_run.verbose: - notice("Running {0}", test) - elif not test_run.quiet: - print("{0:.<72} ".format(test.name + " "), end="") - - timeout = nvl(test.timeout, test_run.test_timeout) - - with temp_file() as output_file: - try: - with Timer(timeout=timeout) as timer: - if test_run.verbose: - test(test_run) - else: - with output_redirected(output_file, quiet=True): - test(test_run) - except KeyboardInterrupt: - raise - except PlanoTestSkipped as e: - test_run.skipped_tests.append(test) - - if test_run.verbose: - notice("{0} SKIPPED ({1})", test, format_duration(timer.elapsed_time)) - elif not test_run.quiet: - _print_test_result("SKIPPED", timer, "yellow") - print("Reason: {0}".format(str(e))) - except Exception as e: - test_run.failed_tests.append(test) - - if test_run.verbose: - _traceback.print_exc() - - if isinstance(e, PlanoTimeout): - error("{0} **FAILED** (TIMEOUT) ({1})", test, format_duration(timer.elapsed_time)) - else: - error("{0} **FAILED** ({1})", test, format_duration(timer.elapsed_time)) - elif not test_run.quiet: - if isinstance(e, PlanoTimeout): - _print_test_result("**FAILED** (TIMEOUT)", timer, color="red", bright=True) - else: - _print_test_result("**FAILED**", timer, color="red", bright=True) - - _print_test_error(e) - _print_test_output(output_file) - - if test_run.fail_fast: - return True - else: - test_run.passed_tests.append(test) - - if test_run.verbose: - notice("{0} PASSED ({1})", test, format_duration(timer.elapsed_time)) - elif not test_run.quiet: - _print_test_result("PASSED", timer) - -def _print_test_result(status, timer, color="white", bright=False): - cprint("{0:<7}".format(status), color=color, bright=bright, end="") - print("{0:>6}".format(format_duration(timer.elapsed_time, align=True))) - -def _print_test_error(e): - cprint("--- Error ---", color="yellow") - - if isinstance(e, PlanoProcessError): - print("> {0}".format(str(e))) - else: - lines = _traceback.format_exc().rstrip().split("\n") - lines = ["> {0}".format(x) for x in lines] - - print("\n".join(lines)) - -def _print_test_output(output_file): - if get_file_size(output_file) == 0: - return - - cprint("--- Output ---", color="yellow") - - with open(output_file, "r") as out: - for line in out: - print("> {0}".format(line), end="") - -class TestRun(object): - def __init__(self, test_timeout=None, fail_fast=False, verbose=False, quiet=False): - self.test_timeout = test_timeout - self.fail_fast = fail_fast - self.verbose = verbose - self.quiet = quiet - - self.tests = list() - self.skipped_tests = list() - self.failed_tests = list() - self.passed_tests = list() - - def __repr__(self): - return format_repr(self) - -class expect_exception(object): - def __init__(self, exception_type=Exception, contains=None): - self.exception_type = exception_type - self.contains = contains - - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, traceback): - if exc_value is None: - assert False, "Never encountered expected exception {0}".format(self.exception_type.__name__) - - if self.contains is None: - return isinstance(exc_value, self.exception_type) - else: - return isinstance(exc_value, self.exception_type) and self.contains in str(exc_value) - -class expect_error(expect_exception): - def __init__(self, contains=None): - super(expect_error, self).__init__(PlanoError, contains=contains) - -class expect_timeout(expect_exception): - def __init__(self, contains=None): - super(expect_timeout, self).__init__(PlanoTimeout, contains=contains) - -class expect_system_exit(expect_exception): - def __init__(self, contains=None): - super(expect_system_exit, self).__init__(SystemExit, contains=contains) - -class expect_output(temp_file): - def __init__(self, equals=None, contains=None, startswith=None, endswith=None): - super(expect_output, self).__init__() - self.equals = equals - self.contains = contains - self.startswith = startswith - self.endswith = endswith - - def __exit__(self, exc_type, exc_value, traceback): - result = read(self.file) - - if self.equals is None: - assert len(result) > 0, result - else: - assert result == self.equals, result - - if self.contains is not None: - assert self.contains in result, result - - if self.startswith is not None: - assert result.startswith(self.startswith), result - - if self.endswith is not None: - assert result.endswith(self.endswith), result - - super(expect_output, self).__exit__(exc_type, exc_value, traceback) - -class PlanoTestCommand(BaseCommand): - def __init__(self, test_modules=[]): - super(PlanoTestCommand, self).__init__() - - self.test_modules = test_modules - - if _inspect.ismodule(self.test_modules): - self.test_modules = [self.test_modules] - - self.parser = BaseArgumentParser() - self.parser.add_argument("include", metavar="PATTERN", nargs="*", default=["*"], - help="Run only tests with names matching PATTERN. This option can be repeated.") - self.parser.add_argument("-e", "--exclude", metavar="PATTERN", action="append", default=[], - help="Do not run tests with names matching PATTERN. This option can be repeated.") - self.parser.add_argument("-m", "--module", action="append", default=[], - help="Load tests from MODULE. This option can be repeated.") - self.parser.add_argument("-l", "--list", action="store_true", - help="Print the test names and exit") - self.parser.add_argument("--enable", metavar="PATTERN", action="append", default=[], - help="Enable disabled tests matching PATTERN. This option can be repeated.") - self.parser.add_argument("--timeout", metavar="SECONDS", type=int, default=300, - help="Fail any test running longer than SECONDS (default 300)") - self.parser.add_argument("--fail-fast", action="store_true", - help="Exit on the first failure encountered in a test run") - self.parser.add_argument("--iterations", metavar="COUNT", type=int, default=1, - help="Run the tests COUNT times (default 1)") - - def parse_args(self, args): - return self.parser.parse_args(args) - - def init(self, args): - self.list_only = args.list - self.include_patterns = args.include - self.exclude_patterns = args.exclude - self.enable_patterns = args.enable - self.timeout = args.timeout - self.fail_fast = args.fail_fast - self.iterations = args.iterations - - try: - for name in args.module: - self.test_modules.append(_import_module(name)) - except ImportError as e: - raise PlanoError(e) - - def run(self): - if self.list_only: - print_tests(self.test_modules) - return - - for i in range(self.iterations): - run_tests(self.test_modules, include=self.include_patterns, exclude=self.exclude_patterns, enable=self.enable_patterns, - test_timeout=self.timeout, fail_fast=self.fail_fast, verbose=self.verbose, quiet=self.quiet) - -## Plano command operations - -_command_help = { - "build": "Build artifacts from source", - "clean": "Clean up the source tree", - "dist": "Generate distribution artifacts", - "install": "Install the built artifacts on your system", - "test": "Run the tests", -} - -def command(_function=None, name=None, args=None, parent=None): - class Command(object): - def __init__(self, function): - self.function = function - self.module = _inspect.getmodule(self.function) - - self.name = name - self.args = args - self.parent = parent - - if self.parent is None: - self.name = nvl(self.name, function.__name__.rstrip("_").replace("_", "-")) - self.args = self.process_args(self.args) - else: - self.name = nvl(self.name, self.parent.name) - self.args = nvl(self.args, self.parent.args) - - doc = _inspect.getdoc(self.function) - - if doc is None: - self.help = _command_help.get(self.name) - self.description = self.help - else: - self.help = doc.split("\n")[0] - self.description = doc - - if self.parent is not None: - self.help = nvl(self.help, self.parent.help) - self.description = nvl(self.description, self.parent.description) - - debug("Defining {0}", self) - - for arg in self.args.values(): - debug(" {0}", str(arg).capitalize()) - - def __repr__(self): - return "command '{0}:{1}'".format(self.module.__name__, self.name) - - def process_args(self, input_args): - sig = _inspect.signature(self.function) - params = list(sig.parameters.values()) - input_args = {x.name: x for x in nvl(input_args, ())} - output_args = _collections.OrderedDict() - - try: - app_param = params.pop(0) - except IndexError: - raise PlanoError("The function for {0} is missing the required 'app' parameter".format(self)) - else: - if app_param.name != "app": - raise PlanoError("The function for {0} is missing the required 'app' parameter".format(self)) - - for param in params: - try: - arg = input_args[param.name] - except KeyError: - arg = CommandArgument(param.name) - - if param.kind is param.POSITIONAL_ONLY: # pragma: nocover - if arg.positional is None: - arg.positional = True - elif param.kind is param.POSITIONAL_OR_KEYWORD and param.default is param.empty: - if arg.positional is None: - arg.positional = True - elif param.kind is param.POSITIONAL_OR_KEYWORD and param.default is not param.empty: - arg.optional = True - arg.default = param.default - elif param.kind is param.VAR_POSITIONAL: - if arg.positional is None: - arg.positional = True - arg.multiple = True - elif param.kind is param.VAR_KEYWORD: - continue - elif param.kind is param.KEYWORD_ONLY: - arg.optional = True - arg.default = param.default - else: # pragma: nocover - raise NotImplementedError(param.kind) - - if arg.type is None and arg.default not in (None, False): # XXX why false? - arg.type = type(arg.default) - - output_args[arg.name] = arg - - return output_args - - def __call__(self, app, *args, **kwargs): - assert isinstance(app, PlanoCommand), app - - command = app.bound_commands[self.name] - - if command is not self: - command(app, *args, **kwargs) - return - - debug("Running {0} {1} {2}".format(self, args, kwargs)) - - app.running_commands.append(self) - - dashes = "--" * len(app.running_commands) - display_args = list(self.get_display_args(args, kwargs)) - - with console_color("magenta", file=_sys.stderr): - eprint("{0}> {1}".format(dashes, self.name), end="") - - if display_args: - eprint(" ({0})".format(", ".join(display_args)), end="") - - eprint() - - self.function(app, *args, **kwargs) - - cprint("<{0} {1}".format(dashes, self.name), color="magenta", file=_sys.stderr) - - app.running_commands.pop() - - if app.running_commands: - name = app.running_commands[-1].name - - cprint("{0}| {1}".format(dashes[:-2], name), color="magenta", file=_sys.stderr) - - def super(self, app, *args, **kwargs): - assert isinstance(app, PlanoCommand), app - - if self.parent is None: - raise PlanoError("You called super() in a command with no parent ({0})".format(self)) - - self.parent.function(app, *args, **kwargs) - - def get_display_args(self, args, kwargs): - for i, arg in enumerate(self.args.values()): - if arg.positional: - if arg.multiple: - for va in args[i:]: - yield repr(va) - elif arg.optional: - value = args[i] - - if value == arg.default: - continue - - yield repr(value) - else: - yield repr(args[i]) - else: - value = kwargs.get(arg.name, arg.default) - - if value == arg.default: - continue - - if value in (True, False): - value = str(value).lower() - else: - value = repr(value) - - yield "{0}={1}".format(arg.display_name, value) - - if _function is None: - return Command - else: - return Command(_function) - -class CommandArgument(object): - def __init__(self, name, display_name=None, type=None, metavar=None, help=None, short_option=None, default=None, positional=None): - self.name = name - self.display_name = nvl(display_name, self.name.replace("_", "-")) - self.type = type - self.metavar = nvl(metavar, self.display_name.upper()) - self.help = help - self.short_option = short_option - self.default = default - self.positional = positional - - self.optional = False - self.multiple = False - - def __repr__(self): - return "argument '{0}' (default {1})".format(self.name, repr(self.default)) - -class PlanoCommand(BaseCommand): - def __init__(self, planofile=None): - self.planofile = planofile - - description = "Run commands defined as Python functions" - - self.pre_parser = BaseArgumentParser(description=description, add_help=False) - self.pre_parser.add_argument("-h", "--help", action="store_true", - help="Show this help message and exit") - - if self.planofile is None: - self.pre_parser.add_argument("-f", "--file", - help="Load commands from FILE (default 'Planofile' or '.planofile')") - - self.parser = _argparse.ArgumentParser(parents=(self.pre_parser,), add_help=False, allow_abbrev=False) - - self.bound_commands = _collections.OrderedDict() - self.running_commands = list() - - self.default_command_name = None - self.default_command_args = None - self.default_command_kwargs = None - - # def bind_commands(self, module): - # self._bind_commands(vars(module)) - - def set_default_command(self, name, *args, **kwargs): - self.default_command_name = name - self.default_command_args = args - self.default_command_kwargs = kwargs - - def parse_args(self, args): - pre_args, _ = self.pre_parser.parse_known_args(args) - - self._load_config(getattr(pre_args, "file", None)) - self._process_commands() - - return self.parser.parse_args(args) - - def init(self, args): - # XXX Can this move to the top of run? - if args.help or args.command is None and self.default_command_name is None: - self.parser.print_help() - self.init_only = True - return - - if args.command is None: - self.selected_command = self.bound_commands[self.default_command_name] - self.command_args = self.default_command_args - self.command_kwargs = self.default_command_kwargs - else: - self.selected_command = self.bound_commands[args.command] - self.command_args = list() - self.command_kwargs = dict() - - for arg in self.selected_command.args.values(): - if arg.positional: - if arg.multiple: - self.command_args.extend(getattr(args, arg.name)) - else: - self.command_args.append(getattr(args, arg.name)) - else: - self.command_kwargs[arg.name] = getattr(args, arg.name) - - def run(self): - with Timer() as timer: - self.selected_command(self, *self.command_args, **self.command_kwargs) - - cprint("OK", color="green", file=_sys.stderr, end="") - cprint(" ({0})".format(format_duration(timer.elapsed_time)), color="magenta", file=_sys.stderr) - - def _bind_commands(self, scope): - for var in scope.values(): - if callable(var) and var.__class__.__name__ == "Command": - self.bound_commands[var.name] = var - - def _load_config(self, planofile): - if planofile is None: - planofile = self.planofile - - if planofile is not None and is_dir(planofile): - planofile = self._find_planofile(planofile) - - if planofile is not None and not is_file(planofile): - exit("Planofile '{0}' not found", planofile) - - if planofile is None: - planofile = self._find_planofile(get_current_dir()) - - if planofile is None: - return - - debug("Loading '{0}'", planofile) - - _sys.path.insert(0, join(get_parent_dir(planofile), "python")) - - scope = dict(globals()) - scope["app"] = self - - try: - with open(planofile) as f: - exec(f.read(), scope) - except Exception as e: - error(e) - exit("Failure loading {0}: {1}", repr(planofile), str(e)) - - self._bind_commands(scope) - - def _find_planofile(self, dir): - for name in ("Planofile", ".planofile"): - path = join(dir, name) - - if is_file(path): - return path - - def _process_commands(self): - subparsers = self.parser.add_subparsers(title="commands", dest="command") - - for command in self.bound_commands.values(): - subparser = subparsers.add_parser(command.name, help=command.help, - description=nvl(command.description, command.help), - formatter_class=_argparse.RawDescriptionHelpFormatter) - - for arg in command.args.values(): - if arg.positional: - if arg.multiple: - subparser.add_argument(arg.name, metavar=arg.metavar, type=arg.type, help=arg.help, nargs="*") - elif arg.optional: - subparser.add_argument(arg.name, metavar=arg.metavar, type=arg.type, help=arg.help, nargs="?", default=arg.default) - else: - subparser.add_argument(arg.name, metavar=arg.metavar, type=arg.type, help=arg.help) - else: - flag_args = list() - - if arg.short_option is not None: - flag_args.append("-{0}".format(arg.short_option)) - - flag_args.append("--{0}".format(arg.display_name)) - - help = arg.help - - if arg.default not in (None, False): - if help is None: - help = "Default value is {0}".format(repr(arg.default)) - else: - help += " (default {0})".format(repr(arg.default)) - - if arg.default is False: - subparser.add_argument(*flag_args, dest=arg.name, default=arg.default, action="store_true", help=help) - else: - subparser.add_argument(*flag_args, dest=arg.name, default=arg.default, metavar=arg.metavar, type=arg.type, help=help) - - _capitalize_help(subparser) - -## Plano shell operations - -class PlanoShellCommand(BaseCommand): - def __init__(self): - self.parser = BaseArgumentParser() - self.parser.add_argument("file", metavar="FILE", nargs="?", - help="Read program from FILE") - self.parser.add_argument("arg", metavar="ARG", nargs="*", - help="Program arguments") - self.parser.add_argument("-c", "--command", - help="A program passed in as a string") - self.parser.add_argument("-i", "--interactive", action="store_true", - help="Operate interactively after running the program (if any)") - - def parse_args(self, args): - return self.parser.parse_args(args) - - def init(self, args): - self.file = args.file - self.interactive = args.interactive - self.command = args.command - - def run(self): - stdin_isatty = _os.isatty(_sys.stdin.fileno()) - script = None - - if self.file == "-": # pragma: nocover - script = _sys.stdin.read() - elif self.file is not None: - try: - with open(self.file) as f: - script = f.read() - except IOError as e: - raise PlanoError(e) - elif not stdin_isatty: # pragma: nocover - # Stdin is a pipe - script = _sys.stdin.read() - - if self.command is not None: - exec(self.command, globals()) - - if script is not None: - global ARGS - ARGS = ARGS[1:] - - exec(script, globals()) - - if (self.command is None and self.file is None and stdin_isatty) or self.interactive: # pragma: nocover - _code.InteractiveConsole(locals=globals()).interact() - -if PLANO_DEBUG: # pragma: nocover - enable_logging(level="debug") - -if __name__ == "__main__": # pragma: nocover - PlanoCommand().main() diff --git a/subrepos/skewer/subrepos/plano/scripts/devel.sh b/subrepos/skewer/subrepos/plano/scripts/devel.sh deleted file mode 100644 index 88b563c..0000000 --- a/subrepos/skewer/subrepos/plano/scripts/devel.sh +++ /dev/null @@ -1,2 +0,0 @@ -export PATH=$(echo $PWD/build/scripts-*):$PATH -export PYTHONPATH=$PWD/build/lib:$PWD/python diff --git a/subrepos/skewer/subrepos/plano/scripts/test-bootstrap.dockerfile b/subrepos/skewer/subrepos/plano/scripts/test-bootstrap.dockerfile deleted file mode 100644 index f6e0416..0000000 --- a/subrepos/skewer/subrepos/plano/scripts/test-bootstrap.dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -FROM fedora - -RUN dnf -qy update && dnf -q clean all - -RUN dnf -y install python git - -COPY test-project /root/test-project -COPY bin/plano /root/test-project/plano - -WORKDIR /root/test-project -RUN git init - -RUN mkdir /root/test-project/modules - -WORKDIR /root/test-project/modules -RUN git submodule add https://github.com/ssorj/plano.git - -WORKDIR /root/test-project/python -RUN ln -s ../modules/plano/python/plano.py -RUN ln -s ../modules/plano/python/bullseye.py - -WORKDIR /root/test-project -RUN ./plano || : - -RUN git submodule update --init - -CMD ["./plano"] diff --git a/subrepos/skewer/subrepos/plano/setup.py b/subrepos/skewer/subrepos/plano/setup.py deleted file mode 100755 index ac3c8ce..0000000 --- a/subrepos/skewer/subrepos/plano/setup.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python3 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import collections -import os -import tempfile - -from distutils.core import setup -from distutils.command.build_scripts import build_scripts -from distutils.file_util import copy_file - -class _build_scripts(build_scripts): - def run(self): - try: - prefix = self.distribution.command_options["install"]["prefix"][1] - except KeyError: - try: - self.distribution.command_options["install"]["user"] - except KeyError: - prefix = "/usr/local" - else: - prefix = os.path.join(os.path.expanduser("~"), ".local") - - temp_dir = tempfile.mkdtemp() - default_home = os.path.join(prefix, "lib", "plano") - - for name in os.listdir("bin"): - if name.endswith(".in"): - in_path = os.path.join("bin", name) - out_path = os.path.join(temp_dir, name[:-3]) - - content = open(in_path).read() - content = content.replace("@default_home@", default_home) - open(out_path, "w").write(content) - - self.scripts.remove(in_path) - self.scripts.append(out_path) - - super(_build_scripts, self).run() - -def find_data_files(dir, output_prefix): - data_files = collections.defaultdict(list) - - for root, dirs, files in os.walk(dir): - for name in files: - data_files[os.path.join(output_prefix, root)].append(os.path.join(root, name)) - - return [(k, v) for k, v in data_files.items()] - -setup(name="plano", - version="1.0.0-SNAPSHOT", - url="https://github.com/ssorj/plano", - author="Justin Ross", - author_email="justin.ross@gmail.com", - cmdclass={'build_scripts': _build_scripts}, - py_modules=["plano"], - package_dir={"": "python"}, - data_files=[("lib/plano/python", ["python/plano_tests.py", - "python/bullseye.py", - "python/bullseye.strings", - "python/bullseye_tests.py"]), - *find_data_files("test-project", "lib/plano")], - scripts=["bin/plano", "bin/planosh", "bin/planotest", "bin/plano-self-test.in"]) diff --git a/subrepos/skewer/subrepos/plano/test-project/Planofile b/subrepos/skewer/subrepos/plano/test-project/Planofile deleted file mode 100644 index c041efa..0000000 --- a/subrepos/skewer/subrepos/plano/test-project/Planofile +++ /dev/null @@ -1,75 +0,0 @@ -from bullseye import * - -app.set_default_command("build", prefix="/tmp/alpha") - -project.name = "chucker" -project.data_dirs = ["files"] -project.excluded_modules = ["flipper"] -project.test_modules = ["chucker_tests"] - -result_file = "build/result.json" - -@command(parent=build) -def build(app, *args, **kwargs): - build.super(app, *args, **kwargs) - - notice("Extended building") - - data = {"built": True} - write_json(result_file, data) - -@command(parent=test) -def test(app, *args, **kwargs): - test.super(app, *args, **kwargs) - - notice("Extended testing") - - check_file(result_file) - - if exists(result_file): - data = read_json(result_file) - data["tested"] = True - write_json(result_file, data) - -@command(parent=install) -def install(app, *args, **kwargs): - install.super(app, *args, **kwargs) - - notice("Extended installing") - - data = read_json(result_file) - data["installed"] = True - write_json(result_file, data) - -@command -def base_command(app, alpha, beta, omega="x"): - print("base", alpha, beta, omega) - -@command(name="extended-command", parent=base_command) -def extended_command(app, alpha, beta, omega="y"): - print("extended", alpha, omega) - extended_command.super(app, alpha, beta, omega) - -@command(args=(CommandArgument("message_", help="The message to print", display_name="message"), - CommandArgument("count", help="Print the message COUNT times"), - CommandArgument("extra", default=1, short_option="e"))) -def echo(app, message_, count=1, extra=None, trouble=False): - """Print a message to the console""" - - print("Echoing (message={0}, count={1})".format(message_, count)) - - if trouble: - raise Exception("Trouble") - - for i in range(count): - print(message_) - -@command -def haberdash(app, first, *middle, last="bowler"): - data = [first, *middle, last] - write_json("haberdash.json", data) - -@command(args=(CommandArgument("optional", positional=True),)) -def balderdash(app, required, optional="malarkey", other="rubbish"): - data = [required, optional, other] - write_json("balderdash.json", data) diff --git a/subrepos/skewer/subrepos/plano/test-project/bin/chucker.in b/subrepos/skewer/subrepos/plano/test-project/bin/chucker.in deleted file mode 100644 index f338f8a..0000000 --- a/subrepos/skewer/subrepos/plano/test-project/bin/chucker.in +++ /dev/null @@ -1 +0,0 @@ -@default_home@ diff --git a/subrepos/skewer/subrepos/plano/test-project/files/notes.txt b/subrepos/skewer/subrepos/plano/test-project/files/notes.txt deleted file mode 100644 index e69de29..0000000 diff --git a/subrepos/skewer/subrepos/plano/test-project/python/chucker.py b/subrepos/skewer/subrepos/plano/test-project/python/chucker.py deleted file mode 100644 index e69de29..0000000 diff --git a/subrepos/skewer/subrepos/plano/test-project/python/chucker_tests.py b/subrepos/skewer/subrepos/plano/test-project/python/chucker_tests.py deleted file mode 100644 index 95a1c6b..0000000 --- a/subrepos/skewer/subrepos/plano/test-project/python/chucker_tests.py +++ /dev/null @@ -1,35 +0,0 @@ -from plano import * - -@test -def test_hello(): - print("Hello") - -@test -def test_goodbye(): - print("Goodbye") - -@test(disabled=True) -def test_badbye(): - print("Badbye") - assert False - -@test -def test_skipped(): - raise PlanoTestSkipped("Test coverage") - -@test(disabled=True) -def test_keyboard_interrupt(): - raise KeyboardInterrupt() - -@test(disabled=True, timeout=0.05) -def test_timeout(): - sleep(10, quiet=True) - assert False - -@test(disabled=True) -def test_process_error(): - run("expr 1 / 0") - -@test(disabled=True) -def test_system_exit(): - exit(1) diff --git a/subrepos/skewer/subrepos/plano/test-project/python/flipper.py b/subrepos/skewer/subrepos/plano/test-project/python/flipper.py deleted file mode 100644 index e69de29..0000000 diff --git a/subrepos/skewer/test-example/.github/workflows/main.yaml b/subrepos/skewer/test-example/.github/workflows/main.yaml deleted file mode 100644 index e32360c..0000000 --- a/subrepos/skewer/test-example/.github/workflows/main.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: main -on: - push: - pull_request: - schedule: - - cron: "0 0 * * 0" -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.x" - - uses: manusa/actions-setup-minikube@v2.6.0 - with: - minikube version: "v1.25.2" - kubernetes version: "v1.24.1" - github token: ${{ secrets.GITHUB_TOKEN }} - - run: curl -f https://skupper.io/install.sh | sh - - run: echo "$HOME/.local/bin" >> $GITHUB_PATH - - run: ./plano test diff --git a/subrepos/skewer/test-example/.planofile b/subrepos/skewer/test-example/.planofile deleted file mode 120000 index 46de17c..0000000 --- a/subrepos/skewer/test-example/.planofile +++ /dev/null @@ -1 +0,0 @@ -subrepos/skewer/config/.planofile \ No newline at end of file diff --git a/subrepos/skewer/test-example/README.md b/subrepos/skewer/test-example/README.md deleted file mode 100644 index 3222035..0000000 --- a/subrepos/skewer/test-example/README.md +++ /dev/null @@ -1,471 +0,0 @@ -# Skupper Hello World - -[![main](https://github.com/skupperproject/skewer/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skewer/actions/workflows/main.yaml) - -#### A minimal HTTP application deployed across Kubernetes clusters using Skupper - - -This example is part of a [suite of examples][examples] showing the -different ways you can use [Skupper][website] to connect services -across cloud providers, data centers, and edge sites. - -[website]: https://skupper.io/ -[examples]: https://skupper.io/examples/index.html - - -#### Contents - -* [Overview](#overview) -* [Prerequisites](#prerequisites) -* [Step 1: Configure separate console sessions](#step-1-configure-separate-console-sessions) -* [Step 2: Access your clusters](#step-2-access-your-clusters) -* [Step 3: Set up your namespaces](#step-3-set-up-your-namespaces) -* [Step 4: Install Skupper in your namespaces](#step-4-install-skupper-in-your-namespaces) -* [Step 5: Check the status of your namespaces](#step-5-check-the-status-of-your-namespaces) -* [Step 6: Link your namespaces](#step-6-link-your-namespaces) -* [Step 7: Deploy the frontend and backend services](#step-7-deploy-the-frontend-and-backend-services) -* [Step 8: Expose the backend service](#step-8-expose-the-backend-service) -* [Step 9: Expose the frontend service](#step-9-expose-the-frontend-service) -* [Step 10: Test the application](#step-10-test-the-application) -* [Accessing the web console](#accessing-the-web-console) -* [Cleaning up](#cleaning-up) -* [Summary](#summary) - -## Overview - -This example is a very simple multi-service HTTP application that can -be deployed across multiple Kubernetes clusters using Skupper. - -It contains two services: - -* A backend service that exposes an `/api/hello` endpoint. It - returns greetings of the form `Hi, . I am - ()`. - -* A frontend service that sends greetings to the backend and - fetches new greetings in response. - -With Skupper, you can place the backend in one cluster and the -frontend in another and maintain connectivity between the two -services without exposing the backend to the public internet. - - - -## Prerequisites - - -* The `kubectl` command-line tool, version 1.15 or later - ([installation guide][install-kubectl]) - -* The `skupper` command-line tool, the latest version ([installation - guide][install-skupper]) - -* Access to at least one Kubernetes cluster, from any provider you - choose - -[install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ -[install-skupper]: https://skupper.io/install/index.html - - -## Step 1: Configure separate console sessions - -Skupper is designed for use with multiple namespaces, typically on -different clusters. The `skupper` command uses your -[kubeconfig][kubeconfig] and current context to select the -namespace where it operates. - -[kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ - -Your kubeconfig is stored in a file in your home directory. The -`skupper` and `kubectl` commands use the `KUBECONFIG` environment -variable to locate it. - -A single kubeconfig supports only one active context per user. -Since you will be using multiple contexts at once in this -exercise, you need to create distinct kubeconfigs. - -Start a console session for each of your namespaces. Set the -`KUBECONFIG` environment variable to a different path in each -session. - -_**Console for west:**_ - -~~~ shell -export KUBECONFIG=~/.kube/config-west -~~~ - -_**Console for east:**_ - -~~~ shell -export KUBECONFIG=~/.kube/config-east -~~~ - -## Step 2: Access your clusters - -The methods for accessing your clusters vary by Kubernetes -provider. Find the instructions for your chosen providers and use -them to authenticate and configure access for each console -session. See the following links for more information: - -* [Minikube](https://skupper.io/start/minikube.html) -* [Amazon Elastic Kubernetes Service (EKS)](https://skupper.io/start/eks.html) -* [Azure Kubernetes Service (AKS)](https://skupper.io/start/aks.html) -* [Google Kubernetes Engine (GKE)](https://skupper.io/start/gke.html) -* [IBM Kubernetes Service](https://skupper.io/start/ibmks.html) -* [OpenShift](https://skupper.io/start/openshift.html) -* [More providers](https://kubernetes.io/partners/#kcsp) - -## Step 3: Set up your namespaces - -Use `kubectl create namespace` to create the namespaces you wish -to use (or use existing namespaces). Use `kubectl config -set-context` to set the current namespace for each session. - -_**Console for west:**_ - -~~~ shell -kubectl create namespace west -kubectl config set-context --current --namespace west -~~~ - -_Sample output:_ - -~~~ console -$ kubectl create namespace west -namespace/west created - -$ kubectl config set-context --current --namespace west -Context "minikube" modified. -~~~ - -_**Console for east:**_ - -~~~ shell -kubectl create namespace east -kubectl config set-context --current --namespace east -~~~ - -_Sample output:_ - -~~~ console -$ kubectl create namespace east -namespace/east created - -$ kubectl config set-context --current --namespace east -Context "minikube" modified. -~~~ - -## Step 4: Install Skupper in your namespaces - -The `skupper init` command installs the Skupper router and service -controller in the current namespace. Run the `skupper init` command -in each namespace. - -**Note:** If you are using Minikube, [you need to start `minikube -tunnel`][minikube-tunnel] before you install Skupper. - -[minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel - -_**Console for west:**_ - -~~~ shell -skupper init -~~~ - -_Sample output:_ - -~~~ console -$ skupper init -Waiting for LoadBalancer IP or hostname... -Skupper is now installed in namespace 'west'. Use 'skupper status' to get more information. -~~~ - -_**Console for east:**_ - -~~~ shell -skupper init -~~~ - -_Sample output:_ - -~~~ console -$ skupper init -Waiting for LoadBalancer IP or hostname... -Skupper is now installed in namespace 'east'. Use 'skupper status' to get more information. -~~~ - -## Step 5: Check the status of your namespaces - -Use `skupper status` in each console to check that Skupper is -installed. - -_**Console for west:**_ - -~~~ shell -skupper status -~~~ - -_Sample output:_ - -~~~ console -$ skupper status -Skupper is enabled for namespace "west" in interior mode. It is connected to 1 other site. It has 1 exposed service. -The site console url is: -The credentials for internal console-auth mode are held in secret: 'skupper-console-users' -~~~ - -_**Console for east:**_ - -~~~ shell -skupper status -~~~ - -_Sample output:_ - -~~~ console -$ skupper status -Skupper is enabled for namespace "east" in interior mode. It is connected to 1 other site. It has 1 exposed service. -The site console url is: -The credentials for internal console-auth mode are held in secret: 'skupper-console-users' -~~~ - -As you move through the steps below, you can use `skupper status` at -any time to check your progress. - -## Step 6: Link your namespaces - -Creating a link requires use of two `skupper` commands in -conjunction, `skupper token create` and `skupper link create`. - -The `skupper token create` command generates a secret token that -signifies permission to create a link. The token also carries the -link details. Then, in a remote namespace, The `skupper link -create` command uses the token to create a link to the namespace -that generated it. - -**Note:** The link token is truly a *secret*. Anyone who has the -token can link to your namespace. Make sure that only those you -trust have access to it. - -First, use `skupper token create` in one namespace to generate the -token. Then, use `skupper link create` in the other to create a -link. - -_**Console for west:**_ - -~~~ shell -skupper token create ~/secret.token -~~~ - -_Sample output:_ - -~~~ console -$ skupper token create ~/secret.token -Token written to ~/secret.token -~~~ - -_**Console for east:**_ - -~~~ shell -skupper link create ~/secret.token -~~~ - -_Sample output:_ - -~~~ console -$ skupper link create ~/secret.token -Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) -Check the status of the link using 'skupper link status'. -~~~ - -If your console sessions are on different machines, you may need -to use `sftp` or a similar tool to transfer the token securely. -By default, tokens expire after a single use or 15 minutes after -creation. - -## Step 7: Deploy the frontend and backend services - -Use `kubectl create deployment` to deploy the frontend service -in `west` and the backend service in `east`. - -_**Console for west:**_ - -~~~ shell -kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend -~~~ - -_Sample output:_ - -~~~ console -$ kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend -deployment.apps/frontend created -~~~ - -_**Console for east:**_ - -~~~ shell -kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 -~~~ - -_Sample output:_ - -~~~ console -$ kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 -deployment.apps/backend created -~~~ - -## Step 8: Expose the backend service - -We now have two namespaces linked to form a Skupper network, but -no services are exposed on it. Skupper uses the `skupper -expose` command to select a service from one namespace for -exposure on all the linked namespaces. - -Use `skupper expose` to expose the backend service to the -frontend service. - -_**Console for east:**_ - -~~~ shell -skupper expose deployment/backend --port 8080 -~~~ - -_Sample output:_ - -~~~ console -$ skupper expose deployment/backend --port 8080 -deployment backend exposed as backend -~~~ - -## Step 9: Expose the frontend service - -We have established connectivity between the two namespaces and -made the backend in `east` available to the frontend in `west`. -Before we can test the application, we need external access to -the frontend. - -Use `kubectl expose` with `--type LoadBalancer` to open network -access to the frontend service. - -_**Console for west:**_ - -~~~ shell -kubectl expose deployment/frontend --port 8080 --type LoadBalancer -~~~ - -_Sample output:_ - -~~~ console -$ kubectl expose deployment/frontend --port 8080 --type LoadBalancer -service/frontend exposed -~~~ - -## Step 10: Test the application - -Now we're ready to try it out. Use `kubectl get service/frontend` -to look up the external IP of the frontend service. Then use -`curl` or a similar tool to request the `/api/health` endpoint at -that address. - -**Note:** The `` field in the following commands is a -placeholder. The actual value is an IP address. - -_**Console for west:**_ - -~~~ shell -kubectl get service/frontend -curl http://:8080/api/health -~~~ - -_Sample output:_ - -~~~ console -$ kubectl get service/frontend -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s - -$ curl http://:8080/api/health -OK -~~~ - -If everything is in order, you can now access the web interface by -navigating to `http://:8080/` in your browser. - -## Accessing the web console - -Skupper includes a web console you can use to view the application -network. To access it, use `skupper status` to look up the URL of -the web console. Then use `kubectl get -secret/skupper-console-users` to look up the console admin -password. - -**Note:** The `` and `` fields in the -following output are placeholders. The actual values are specific -to your environment. - -_**Console for west:**_ - -~~~ shell -skupper status -kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d -~~~ - -_Sample output:_ - -~~~ console -$ skupper status -Skupper is enabled for namespace "west" in interior mode. It is connected to 1 other site. It has 1 exposed service. -The site console url is: -The credentials for internal console-auth mode are held in secret: 'skupper-console-users' - -$ kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d - -~~~ - -Navigate to `` in your browser. When prompted, log -in as user `admin` and enter the password. - -## Cleaning up - -To remove Skupper and the other resources from this exercise, use -the following commands. - -_**Console for west:**_ - -~~~ shell -skupper delete -kubectl delete service/frontend -kubectl delete deployment/frontend -~~~ - -_**Console for east:**_ - -~~~ shell -skupper delete -kubectl delete deployment/backend -~~~ - -## Summary - -This example locates the frontend and backend services in different -namespaces, on different clusters. Ordinarily, this means that they -have no way to communicate unless they are exposed to the public -internet. - -Introducing Skupper into each namespace allows us to create a virtual -application network that can connect services in different clusters. -Any service exposed on the application network is represented as a -local service in all of the linked namespaces. - -The backend service is located in `east`, but the frontend service -in `west` can "see" it as if it were local. When the frontend -sends a request to the backend, Skupper forwards the request to the -namespace where the backend is running and routes the response back to -the frontend. - - - -## Next steps - - -Check out the other [examples][examples] on the Skupper website. diff --git a/subrepos/skewer/test-example/images/entities.svg b/subrepos/skewer/test-example/images/entities.svg deleted file mode 100644 index 6a1ab87..0000000 --- a/subrepos/skewer/test-example/images/entities.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Frontend service
Frontend service
Skupper
Skupper
Kubernetes cluster 1
Kubernetes cluster 1
Namespace "west"
Namespace "west"
Namespace "east"
Namespace "east"
Kubernetes cluster 2
Kubernetes cluster 2
Backend service
Backend service
Skupper
Skupper
Public
network
Public<br/>network
diff --git a/subrepos/skewer/test-example/images/sequence.svg b/subrepos/skewer/test-example/images/sequence.svg deleted file mode 100644 index 20d27c1..0000000 --- a/subrepos/skewer/test-example/images/sequence.svg +++ /dev/null @@ -1 +0,0 @@ -westeastCurlFrontendSkupperSkupperBackendBackendGET /         GET /api/hello      GET /api/hello      GET /api/hello"Hello 1"      "Hello 1"      "Hello 1""Hello 1"          diff --git a/subrepos/skewer/test-example/images/sequence.txt b/subrepos/skewer/test-example/images/sequence.txt deleted file mode 100644 index 6d081ea..0000000 --- a/subrepos/skewer/test-example/images/sequence.txt +++ /dev/null @@ -1,22 +0,0 @@ -participant Curl - -participantgroup #cce5ff eu-north -participant Frontend -participant "Skupper" as Skupper1 #lightgreen -end - -participantgroup #ffe6cc us-east -participant "Skupper" as Skupper2 #lightgreen -participant Backend #yellow -end - -abox over Skupper1 #yellow: Backend - -Curl->Frontend: GET / -Frontend->Skupper1: GET /api/hello -Skupper1->Skupper2: GET /api/hello -Skupper2->Backend: GET /api/hello -Skupper2<-Backend: "Hello 1" -Skupper1<-Skupper2: "Hello 1" -Frontend<-Skupper1: "Hello 1" -Curl<-Frontend: "Hello 1" diff --git a/subrepos/skewer/test-example/plano b/subrepos/skewer/test-example/plano deleted file mode 120000 index 0f4ec84..0000000 --- a/subrepos/skewer/test-example/plano +++ /dev/null @@ -1 +0,0 @@ -subrepos/skewer/plano \ No newline at end of file diff --git a/subrepos/skewer/test-example/python/skewer.py b/subrepos/skewer/test-example/python/skewer.py deleted file mode 120000 index a5845bc..0000000 --- a/subrepos/skewer/test-example/python/skewer.py +++ /dev/null @@ -1 +0,0 @@ -../subrepos/skewer/python/skewer.py \ No newline at end of file diff --git a/subrepos/skewer/test-example/skewer.yaml b/subrepos/skewer/test-example/skewer.yaml deleted file mode 100644 index 1f9d103..0000000 --- a/subrepos/skewer/test-example/skewer.yaml +++ /dev/null @@ -1,103 +0,0 @@ -title: Skupper Hello World -subtitle: A minimal HTTP application deployed across Kubernetes clusters using Skupper -github_actions_url: https://github.com/skupperproject/skewer/actions/workflows/main.yaml -overview: | - This example is a very simple multi-service HTTP application that can - be deployed across multiple Kubernetes clusters using Skupper. - - It contains two services: - - * A backend service that exposes an `/api/hello` endpoint. It - returns greetings of the form `Hi, . I am - ()`. - - * A frontend service that sends greetings to the backend and - fetches new greetings in response. - - With Skupper, you can place the backend in one cluster and the - frontend in another and maintain connectivity between the two - services without exposing the backend to the public internet. - - -sites: - west: - kubeconfig: ~/.kube/config-west - namespace: west - east: - kubeconfig: ~/.kube/config-east - namespace: east -steps: - - standard: configure_separate_console_sessions - - standard: access_your_clusters - - standard: set_up_your_namespaces - - standard: install_skupper_in_your_namespaces - - standard: check_the_status_of_your_namespaces - - standard: link_your_namespaces - - title: Deploy the frontend and backend services - preamble: | - Use `kubectl create deployment` to deploy the frontend service - in `west` and the backend service in `east`. - commands: - west: - - run: kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend - output: deployment.apps/frontend created - east: - - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 - output: deployment.apps/backend created - - title: Expose the backend service - preamble: | - We now have two namespaces linked to form a Skupper network, but - no services are exposed on it. Skupper uses the `skupper - expose` command to select a service from one namespace for - exposure on all the linked namespaces. - - Use `skupper expose` to expose the backend service to the - frontend service. - commands: - east: - - await: deployment/backend - - run: skupper expose deployment/backend --port 8080 - output: deployment backend exposed as backend - - title: Expose the frontend service - preamble: | - We have established connectivity between the two namespaces and - made the backend in `east` available to the frontend in `west`. - Before we can test the application, we need external access to - the frontend. - - Use `kubectl expose` with `--type LoadBalancer` to open network - access to the frontend service. - commands: - west: - - await: deployment/frontend - - run: kubectl expose deployment/frontend --port 8080 --type LoadBalancer - output: service/frontend exposed - - standard: test_the_application - - standard: accessing_the_web_console - - standard: cleaning_up - commands: - west: - - run: skupper delete - - run: kubectl delete service/frontend - - run: kubectl delete deployment/frontend - east: - - run: skupper delete - - run: kubectl delete deployment/backend -summary: | - This example locates the frontend and backend services in different - namespaces, on different clusters. Ordinarily, this means that they - have no way to communicate unless they are exposed to the public - internet. - - Introducing Skupper into each namespace allows us to create a virtual - application network that can connect services in different clusters. - Any service exposed on the application network is represented as a - local service in all of the linked namespaces. - - The backend service is located in `east`, but the frontend service - in `west` can "see" it as if it were local. When the frontend - sends a request to the backend, Skupper forwards the request to the - namespace where the backend is running and routes the response back to - the frontend. - - diff --git a/subrepos/skewer/test-example/subrepos/skewer b/subrepos/skewer/test-example/subrepos/skewer deleted file mode 120000 index 6581736..0000000 --- a/subrepos/skewer/test-example/subrepos/skewer +++ /dev/null @@ -1 +0,0 @@ -../../ \ No newline at end of file