From ff623b460aed8989b1505a15ef473c45ea26cdab Mon Sep 17 00:00:00 2001 From: Martin Kravec Date: Wed, 11 Sep 2024 13:09:59 +0200 Subject: [PATCH] Helm chart testing Signed-off-by: Martin Kravec --- Makefile | 88 ++++++++----------- README.md | 4 +- .../default-kubewarden-controller-values.yaml | 9 -- .../default-kubewarden-defaults-values.yaml | 6 -- scripts/generate_resources_dir.sh | 16 +++- tests/common.bash | 2 +- tests/opentelemetry-tests.bats | 15 +++- tests/private-registry-tests.bats | 4 +- tests/upgrade.bats | 14 ++- 9 files changed, 77 insertions(+), 81 deletions(-) delete mode 100644 resources/default-kubewarden-controller-values.yaml delete mode 100644 resources/default-kubewarden-defaults-values.yaml diff --git a/Makefile b/Makefile index 4e855e6..d718aa2 100644 --- a/Makefile +++ b/Makefile @@ -3,60 +3,56 @@ mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) mkfile_dir := $(dir $(mkfile_path)) TESTS_DIR ?= $(mkfile_dir)tests -# directory with all the "template" files used to generated the files used during -# the tests. +# directory with all the "template" files used to generated the files used during the tests ROOT_RESOURCES_DIR ?= $(mkfile_dir)resources -# timeout for the kubectl commands -TIMEOUT ?= 5m -# The KUBEWARDEN_CHARTS_LOCATION variable define where charts live. By default, the Helm -# chart repository is used. However, if you want to test a local Helm chart -# version, you can overwrite this variable with the parent directory of the chart. -# But the chart name must be equal of the names in the Helm chart repository. -KUBEWARDEN_CHARTS_LOCATION ?= kubewarden -CONTROLLER_CHART ?= $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-controller -NAMESPACE ?= kubewarden -K3D_VERSION ?= v5.7.3 -# helm repo name used to download the Helm charts. + +# Kubewarden helm repository KUBEWARDEN_HELM_REPO_NAME ?= kubewarden -# URL where the Helm charts are stored -KUBEWARDEN_HELM_REPO_URL ?= https://charts.kubewarden.io - -KUBEWARDEN_CONTROLLER_CHART_VERSION ?= $(shell helm search repo $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_CONTROLLER_CHART_RELEASE) -o json --devel | jq -r ".[0].version") -KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION ?= $(shell helm search repo $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_CONTROLLER_CHART_RELEASE) -o json --versions | jq -r ".[1].version") -KUBEWARDEN_CONTROLLER_CHART_RELEASE ?= kubewarden-controller -KUBEWARDEN_CRDS_CHART_VERSION ?= $(shell helm search repo $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_CRDS_CHART_RELEASE) -o json --devel | jq -r ".[0].version") -KUBEWARDEN_CRDS_CHART_OLD_VERSION ?= $(shell helm search repo $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_CRDS_CHART_RELEASE) -o json --versions | jq -r ".[1].version") -KUBEWARDEN_CRDS_CHART_RELEASE ?= kubewarden-crds -KUBEWARDEN_DEFAULTS_CHART_VERSION ?= $(shell helm search repo $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_DEFAULTS_CHART_RELEASE) -o json --devel | jq -r ".[0].version") -KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION ?= $(shell helm search repo $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_DEFAULTS_CHART_RELEASE) -o json --versions | jq -r ".[1].version") -KUBEWARDEN_DEFAULTS_CHART_RELEASE ?= kubewarden-defaults -CERT_MANAGER_VERSION ?= v1.15.1 -# +# Override to use kubewarden charts from local directory +KUBEWARDEN_CHARTS_LOCATION ?= $(KUBEWARDEN_HELM_REPO_NAME) +NAMESPACE ?= kubewarden + +export CLUSTER_NAME ?= kubewarden-testing +CLUSTER_CONTEXT ?= k3d-$(CLUSTER_NAME) + +O := $(shell helm repo add $(KUBEWARDEN_HELM_REPO_NAME) https://charts.kubewarden.io --force-update) +O := $(shell helm repo update $(KUBEWARDEN_HELM_REPO_NAME)) + +# Parse current and previous helm versions for upgrade test: +# Current: last version from helm search kubewarden --devel +# Old: version that is older than the current version and also not an "-rc" +KW_VERSIONS := $(shell helm search repo --fail-on-no-result $(KUBEWARDEN_HELM_REPO_NAME)/ --versions --devel -o json | tr -d \' \ + | jq -ec 'unique_by(.name) as $$c | { current:($$c | map({(.name): .version}) | add), old:map(select(.app_version != $$c[0].app_version and (.app_version | contains("rc") | not) )) | unique_by(.name)| map({(.name): .version}) | add}') + +KUBEWARDEN_CONTROLLER_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-controller"]' || echo "*") +KUBEWARDEN_CRDS_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-crds"]' || echo "*") +KUBEWARDEN_DEFAULTS_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-defaults"]' || echo "*") + +KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-controller"]' || echo "*") +KUBEWARDEN_CRDS_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-crds"]' || echo "*") +KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-defaults"]' || echo "*") + # CRD version to be tested -CRD_VERSION ?= $(shell helm show values $(KUBEWARDEN_HELM_REPO_NAME)/$(KUBEWARDEN_DEFAULTS_CHART_RELEASE) --version $(KUBEWARDEN_DEFAULTS_CHART_VERSION) | yq -r ".crdVersion") +CRD_VERSION ?= $(shell helm show values $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults --version '$(KUBEWARDEN_DEFAULTS_CHART_VERSION)' | yq ".crdVersion") CRD_VERSION_SUFFIX ?= $(shell echo $(CRD_VERSION) | cut -d'/' -f2) # directory with all the files used during the tests. This files are copied from # $(ROOT_RESOURCES_DIR) and changed to used the CRDs version defined in $(CRD_VERSION) RESOURCES_DIR ?= $(ROOT_RESOURCES_DIR)/resources_$(CRD_VERSION_SUFFIX) -export CLUSTER_NAME ?= kubewarden-testing -CLUSTER_CONTEXT ?= k3d-$(CLUSTER_NAME) # ================================================================================================== # Aliases kube = kubectl --context $(CLUSTER_CONTEXT) $(1) helm = helm --kube-context $(CLUSTER_CONTEXT) $(1) bats = RESOURCES_DIR=$(RESOURCES_DIR) \ - TIMEOUT=$(TIMEOUT) \ KUBEWARDEN_CRDS_CHART_OLD_VERSION=$(KUBEWARDEN_CRDS_CHART_OLD_VERSION) \ KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION=$(KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION) \ KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION=$(KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION) \ KUBEWARDEN_CRDS_CHART_VERSION=$(KUBEWARDEN_CRDS_CHART_VERSION) \ KUBEWARDEN_DEFAULTS_CHART_VERSION=$(KUBEWARDEN_DEFAULTS_CHART_VERSION) \ KUBEWARDEN_CONTROLLER_CHART_VERSION=$(KUBEWARDEN_CONTROLLER_CHART_VERSION) \ - KUBEWARDEN_CONTROLLER_CHART_RELEASE=$(KUBEWARDEN_CONTROLLER_CHART_RELEASE) \ KUBEWARDEN_CHARTS_LOCATION=$(KUBEWARDEN_CHARTS_LOCATION) \ - CONTROLLER_CHART=$(CONTROLLER_CHART) \ + KUBEWARDEN_HELM_REPO_NAME=$(KUBEWARDEN_HELM_REPO_NAME) \ CLUSTER_CONTEXT=$(CLUSTER_CONTEXT) \ NAMESPACE=$(NAMESPACE) \ bats -T --print-output-on-failure $(1) @@ -66,23 +62,12 @@ helm_in = $(helm) upgrade --install --wait --namespace $(NAMESPACE) --create-nam # ================================================================================================== # Macros define install-kubewarden = - helm repo add --force-update $(KUBEWARDEN_HELM_REPO_NAME) $(KUBEWARDEN_HELM_REPO_URL) - $(helm_in) --version $(KUBEWARDEN_CRDS_CHART_VERSION) \ - $(KUBEWARDEN_CRDS_CHART_RELEASE) $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-crds - $(helm_in) --version $(KUBEWARDEN_CONTROLLER_CHART_VERSION) \ - --values $(ROOT_RESOURCES_DIR)/default-kubewarden-controller-values.yaml \ - $(KUBEWARDEN_CONTROLLER_CHART_RELEASE) $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-controller - $(helm_in) --version $(KUBEWARDEN_DEFAULTS_CHART_VERSION) \ - --values $(ROOT_RESOURCES_DIR)/default-kubewarden-defaults-values.yaml \ - $(KUBEWARDEN_DEFAULTS_CHART_RELEASE) $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults + $(helm_in) kubewarden-crds $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-crds --version "$(KUBEWARDEN_CRDS_CHART_VERSION)" + $(helm_in) kubewarden-controller $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-controller --version "$(KUBEWARDEN_CONTROLLER_CHART_VERSION)" + $(helm_in) kubewarden-defaults $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults --version "$(KUBEWARDEN_DEFAULTS_CHART_VERSION)" $(kube) wait --for=condition=Ready --namespace $(NAMESPACE) pods --all endef -define install-cert-manager = - $(kube) apply -f https://github.com/cert-manager/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml - $(kube) wait --for=condition=Available deployment --timeout=2m -n cert-manager --all -endef - define generate-versioned-resources-dir = ./scripts/generate_resources_dir.sh $(ROOT_RESOURCES_DIR) $(CRD_VERSION) endef @@ -92,9 +77,9 @@ endef # Destructive tests that reinstall kubewarden # Test is responsible for used kubewarden version -upgrade.bats:: - $(MAKE) clean cluster - $(install-cert-manager) +upgrade:: + $(generate-versioned-resources-dir) + $(call bats, $(TESTS_DIR)/upgrade.bats) # Generate target for every test file TESTS := $(notdir $(wildcard tests/*.bats)) @@ -109,11 +94,10 @@ tests: $(filter-out upgrade.bats audit-scanner-installation.bats, $(TESTS)) .PHONY: cluster install reinstall clean cluster: - k3d cluster create $(CLUSTER_NAME) -s 1 -a 1 --wait --timeout $(TIMEOUT) -v /dev/mapper:/dev/mapper + k3d cluster create $(CLUSTER_NAME) -s 1 -a 1 --wait -v /dev/mapper:/dev/mapper $(kube) wait --for=condition=Ready nodes --all install: - $(install-cert-manager) $(install-kubewarden) clean: diff --git a/README.md b/README.md index 9b7bbcc..4f2e0bf 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Other required dependencies: curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash # yq - python yq has different syntax then mikefarah binary -pip3 install yq +Use mikefarah binary, github-runners and zypper default to it # Also kubectl, helm, docker, ... ``` @@ -43,7 +43,7 @@ run the tasks: ```bash make clean # to remove previous k3d cluster make cluster # to create new k3d cluster -make install # to install kubewarden (and cert-manager) +make install # to install kubewarden # or you can group 3 steps above into make reinstall diff --git a/resources/default-kubewarden-controller-values.yaml b/resources/default-kubewarden-controller-values.yaml deleted file mode 100644 index 05f74c6..0000000 --- a/resources/default-kubewarden-controller-values.yaml +++ /dev/null @@ -1,9 +0,0 @@ -#image: -# repository: "ghcr.io/myuser/kubewarden-controller" -# tag: "latest" -telemetry: - enabled: false -policyServer: - env: - - name: KUBEWARDEN_LOG_LEVEL - value: debug diff --git a/resources/default-kubewarden-defaults-values.yaml b/resources/default-kubewarden-defaults-values.yaml deleted file mode 100644 index ef17ff2..0000000 --- a/resources/default-kubewarden-defaults-values.yaml +++ /dev/null @@ -1,6 +0,0 @@ -#policyServer: -# image: -# repository: "ghcr.io/myuser/policy-server" -# tag: "latest" -recommendedPolicies: - enabled: False diff --git a/scripts/generate_resources_dir.sh b/scripts/generate_resources_dir.sh index 53722c8..93f19fe 100755 --- a/scripts/generate_resources_dir.sh +++ b/scripts/generate_resources_dir.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -euo pipefail -CRD=$2 +export CRD=$2 SOURCEDIR="$1" TARGETDIR="$1/resources_${CRD#*/}" @@ -9,6 +9,14 @@ TARGETDIR="$1/resources_${CRD#*/}" mkdir -p $TARGETDIR cp -a $SOURCEDIR/*.yaml $TARGETDIR -# Replace policies apiVersion with $CRD -grep -rlZ "apiVersion: policies.kubewarden.io" $TARGETDIR | xargs -0 \ - yq -i -Y --arg c $CRD '.apiVersion = $c' +# There are 2 implementations of yq, from mikefarah and kislyuk +# yq_kislyuk=(yq -i -Y '.apiVersion = env.CRD') +# yq_mikefarah=(yq eval -i '.apiVersion = env(CRD)') + +# Github runners default to mikefarah, let's support both for local runs +# yq --version | grep -q mikefarah && yqcmd=("${yq_mikefarah[@]}") || yqcmd=("${yq_kislyuk[@]}") + +# Replace apiVersion: policies.kubewarden.io/* -> policies.kubewarden.io/v1 +# grep -rlZ "apiVersion: policies.kubewarden.io" "$TARGETDIR" | xargs -0 -I {} "${yqcmd[@]}" {} + +grep -rlZ "apiVersion: policies.kubewarden.io" "$TARGETDIR" | xargs -0 -I {} yq -i '.apiVersion = env(CRD)' {} diff --git a/tests/common.bash b/tests/common.bash index 1cc5a45..1cfed72 100644 --- a/tests/common.bash +++ b/tests/common.bash @@ -35,7 +35,7 @@ function helm_up { def_version=$KUBEWARDEN_CRDS_CHART_VERSION;; esac - helm upgrade --version $def_version --wait \ + helm upgrade --version "$def_version" --wait \ --namespace $NAMESPACE --create-namespace \ "${@:2}" $1 $KUBEWARDEN_CHARTS_LOCATION/$1 diff --git a/tests/opentelemetry-tests.bats b/tests/opentelemetry-tests.bats index 8da047f..57426d0 100644 --- a/tests/opentelemetry-tests.bats +++ b/tests/opentelemetry-tests.bats @@ -10,6 +10,12 @@ setup() { } @test "[OpenTelemetry] Install OpenTelemetry, Prometheus, Jaeger" { + # Required by OpenTelemetry + helm repo add jetstack https://charts.jetstack.io --force-update + helm upgrade -i --wait cert-manager jetstack/cert-manager \ + -n cert-manager --create-namespace \ + --set crds.enabled=true + # OpemTelementry helm repo add --force-update open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts helm upgrade -i --wait my-opentelemetry-operator open-telemetry/opentelemetry-operator \ @@ -91,9 +97,16 @@ setup() { } teardown_file() { + load common.bash + # Remove installed apps + helm uninstall --wait -n jaeger jaeger-operator + helm uninstall --wait -n prometheus prometheus + helm uninstall --wait -n open-telemetry my-opentelemetry-operator + helm uninstall --wait -n cert-manager cert-manager + # Resources might be already deleted by helm update kubectl delete -f $RESOURCES_DIR/privileged-pod-policy.yaml --ignore-not-found - kubectl delete -f $RESOURCES_DIR/namespace-label-propagator-policy.yaml --ignore-not-found + kubectl delete -f $RESOURCES_DIR/namespace-label-propagator-policy.yaml --ignore-not-found kubectl delete pod nginx-privileged nginx-unprivileged --ignore-not-found kubectl delete jobs -n kubewarden testing --ignore-not-found } diff --git a/tests/private-registry-tests.bats b/tests/private-registry-tests.bats index 8d844fb..dc02fd6 100644 --- a/tests/private-registry-tests.bats +++ b/tests/private-registry-tests.bats @@ -95,8 +95,8 @@ teardown_file() { policy="$BATS_RUN_TMPDIR/private-policy.yaml" kwctl scaffold manifest --type=ClusterAdmissionPolicy $PUB_POLICY |\ - yq -y '.metadata.name = "private-pod-privileged"' |\ - yq -y --arg r $PRIV_POLICY '.spec.module = $r' > $policy + yq '.metadata.name = "private-pod-privileged"' |\ + PP=$PRIV_POLICY yq '.spec.module = strenv(PP)' > $policy # Make sure we use private registry grep -F "module: registry://$REGISTRY" $policy diff --git a/tests/upgrade.bats b/tests/upgrade.bats index 22ccf68..c90fa2c 100644 --- a/tests/upgrade.bats +++ b/tests/upgrade.bats @@ -27,12 +27,15 @@ function check_default_policies { run -1 kubectl run pod-privileged --image=registry.k8s.io/pause --privileged } - @test "[CRD upgrade] Install old Kubewarden" { + # Required by Kubewarden <= v1.16.0 + helm repo add jetstack https://charts.jetstack.io --force-update + helm upgrade -i --wait cert-manager jetstack/cert-manager -n cert-manager --create-namespace --set crds.enabled=true + # Install old kubewarden version - helm_in kubewarden-crds --version $KUBEWARDEN_CRDS_CHART_OLD_VERSION - helm_in kubewarden-controller --version $KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION - helm_in kubewarden-defaults --version $KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION \ + KUBEWARDEN_CHARTS_LOCATION="$KUBEWARDEN_HELM_REPO_NAME" helm_in kubewarden-crds --version $KUBEWARDEN_CRDS_CHART_OLD_VERSION + KUBEWARDEN_CHARTS_LOCATION="$KUBEWARDEN_HELM_REPO_NAME" helm_in kubewarden-controller --version $KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION + KUBEWARDEN_CHARTS_LOCATION="$KUBEWARDEN_HELM_REPO_NAME" helm_in kubewarden-defaults --version $KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION \ --set recommendedPolicies.enabled=True \ --set recommendedPolicies.defaultPolicyMode=protect check_default_policies @@ -43,6 +46,9 @@ function check_default_policies { helm_up kubewarden-controller --version $KUBEWARDEN_CONTROLLER_CHART_VERSION helm_up kubewarden-defaults --version $KUBEWARDEN_DEFAULTS_CHART_VERSION check_default_policies + # Not required by Kubewarden >= v1.17.0 + # Cert-manager versions prior to v1.15.0 do not keep the CustomResourceDefinition on uninstall + helm uninstall cert-manager -n cert-manager } @test "[CRD upgrade] Check old policy CRD version is translated to new" {