From 52631e21342b4c561bdaafe8e4c69825fe4e9333 Mon Sep 17 00:00:00 2001 From: Martin Kravec Date: Thu, 26 Sep 2024 13:53:44 +0200 Subject: [PATCH] Introduce helmer.sh helm manager for tests Implementation of https://github.com/kubewarden/kubewarden-end-to-end-tests/issues/116 - move k3d & helm out of makefile single test run uses github action, makefile, scripts (all of them in sequence) to create cluster & install kubewarden (mixed versions...). - introduce helmer.sh to keep manage kubewarden during test runs Helm management script: - keeps initial installation parameters so we can run follow-up tests with the same setup - provides consistent way to modify helm charts without upgrading chart version - introduce helpers to install kubewarden stack based on app version - allows to test latest images - they are not modified from test files - used by makefile targets to install & upgrade kubewarden ```bash VERSION=1.16.0 CONTROLLER_ARGS="--set image.tag=latest" ./helmer install CHARTS_LOCATION=./charts DEFAULTS_ARGS="--set recommendedPolicies.enabled=true" ./helmer install ./helmer set controller --set telemetry.metrics.enabled=False helm uninstall kubewarden-defaults -n kubewarden ./helmer reinstall defaults ./helmer reset controller ``` Cluster management moved from makefile to cluster_k3d script. Requires 1 action parameter. ``` K3S=1.26 ./cluster_k3d.sh create ./cluster_k3d.sh delete ``` - common.bash (bats helpers) file split into: - kubelib.sh (kubernetes helpers) - helpers.sh (kubewarden helpers, require bats) - policy resource file mover to RESOURCE_DIR/policies - cleaned up helper functions wait_for_admiss, apply_admission_... apply_cluster_admi unified into apply_policy and delete_policy Signed-off-by: Martin Kravec --- .gitignore | 1 + Makefile | 128 ++++------- helpers/helpers.sh | 91 ++++++++ helpers/kubelib.sh | 64 ++++++ ...ues.yaml => opentelemetry-prometheus.yaml} | 0 ...lues.yaml => opentelemetry-telemetry.yaml} | 0 .../{ => policies}/context-aware-policy.yaml | 0 .../mutate-policy-with-flag-disabled.yaml | 2 +- .../mutate-policy-with-flag-enabled.yaml | 2 +- .../namespace-label-propagator-policy.yaml | 0 .../namespace-psa-label-enforcer-policy.yaml | 0 .../{ => policies}/policy-pod-privileged.yaml | 2 +- .../privileged-pod-policy-monitor.yaml | 2 +- .../{ => policies}/privileged-pod-policy.yaml | 2 +- .../{ => policies}/psp-user-group-policy.yaml | 0 .../{ => policies}/safe-labels-namespace.yaml | 0 .../safe-labels-pods-policy.yaml | 0 scripts/cluster_k3d.sh | 40 ++++ scripts/generate_resources_dir.sh | 22 -- scripts/helmer.sh | 206 ++++++++++++++++++ tests/audit-scanner-installation.bats | 25 ++- tests/audit-scanner.bats | 52 ++--- tests/basic-end-to-end-tests.bats | 60 ++--- tests/common.bash | 158 -------------- tests/context-aware-requests-tests.bats | 27 +-- tests/monitor-mode-tests.bats | 33 +-- tests/mutating-requests-tests.bats | 35 +-- tests/namespaced-admission-policy-tests.bats | 53 ++--- tests/opentelemetry-tests.bats | 79 ++++--- tests/private-registry-tests.bats | 148 ++++++------- tests/reconfiguration-tests.bats | 28 +-- tests/secure-supply-chain-tests.bats | 71 +++--- tests/upgrade.bats | 63 +++--- 33 files changed, 787 insertions(+), 607 deletions(-) create mode 100644 helpers/helpers.sh create mode 100644 helpers/kubelib.sh rename resources/{opentelemetry-prometheus-values.yaml => opentelemetry-prometheus.yaml} (100%) rename resources/{opentelemetry-kw-telemetry-values.yaml => opentelemetry-telemetry.yaml} (100%) rename resources/{ => policies}/context-aware-policy.yaml (100%) rename resources/{ => policies}/mutate-policy-with-flag-disabled.yaml (92%) rename resources/{ => policies}/mutate-policy-with-flag-enabled.yaml (92%) rename resources/{ => policies}/namespace-label-propagator-policy.yaml (100%) rename resources/{ => policies}/namespace-psa-label-enforcer-policy.yaml (100%) rename resources/{ => policies}/policy-pod-privileged.yaml (86%) rename resources/{ => policies}/privileged-pod-policy-monitor.yaml (87%) rename resources/{ => policies}/privileged-pod-policy.yaml (91%) rename resources/{ => policies}/psp-user-group-policy.yaml (100%) rename resources/{ => policies}/safe-labels-namespace.yaml (100%) rename resources/{ => policies}/safe-labels-pods-policy.yaml (100%) create mode 100755 scripts/cluster_k3d.sh delete mode 100755 scripts/generate_resources_dir.sh create mode 100755 scripts/helmer.sh delete mode 100644 tests/common.bash diff --git a/.gitignore b/.gitignore index 21cdc38..d3cf027 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ resources/resources_*/ +charts diff --git a/Makefile b/Makefile index d718aa2..0eb6e82 100644 --- a/Makefile +++ b/Makefile @@ -1,106 +1,62 @@ -.DEFAULT_GOAL := basic-end-to-end-tests.bats - -mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) -mkfile_dir := $(dir $(mkfile_path)) -TESTS_DIR ?= $(mkfile_dir)tests -# directory with all the "template" files used to generated the files used during the tests -ROOT_RESOURCES_DIR ?= $(mkfile_dir)resources - -# Kubewarden helm repository -KUBEWARDEN_HELM_REPO_NAME ?= kubewarden -# Override to use kubewarden charts from local directory -KUBEWARDEN_CHARTS_LOCATION ?= $(KUBEWARDEN_HELM_REPO_NAME) -NAMESPACE ?= kubewarden - -export CLUSTER_NAME ?= kubewarden-testing -CLUSTER_CONTEXT ?= k3d-$(CLUSTER_NAME) - -O := $(shell helm repo add $(KUBEWARDEN_HELM_REPO_NAME) https://charts.kubewarden.io --force-update) -O := $(shell helm repo update $(KUBEWARDEN_HELM_REPO_NAME)) - -# Parse current and previous helm versions for upgrade test: -# Current: last version from helm search kubewarden --devel -# Old: version that is older than the current version and also not an "-rc" -KW_VERSIONS := $(shell helm search repo --fail-on-no-result $(KUBEWARDEN_HELM_REPO_NAME)/ --versions --devel -o json | tr -d \' \ - | jq -ec 'unique_by(.name) as $$c | { current:($$c | map({(.name): .version}) | add), old:map(select(.app_version != $$c[0].app_version and (.app_version | contains("rc") | not) )) | unique_by(.name)| map({(.name): .version}) | add}') +.DEFAULT_GOAL := all -KUBEWARDEN_CONTROLLER_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-controller"]' || echo "*") -KUBEWARDEN_CRDS_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-crds"]' || echo "*") -KUBEWARDEN_DEFAULTS_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-defaults"]' || echo "*") - -KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-controller"]' || echo "*") -KUBEWARDEN_CRDS_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-crds"]' || echo "*") -KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-defaults"]' || echo "*") - -# CRD version to be tested -CRD_VERSION ?= $(shell helm show values $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults --version '$(KUBEWARDEN_DEFAULTS_CHART_VERSION)' | yq ".crdVersion") -CRD_VERSION_SUFFIX ?= $(shell echo $(CRD_VERSION) | cut -d'/' -f2) -# directory with all the files used during the tests. This files are copied from -# $(ROOT_RESOURCES_DIR) and changed to used the CRDs version defined in $(CRD_VERSION) -RESOURCES_DIR ?= $(ROOT_RESOURCES_DIR)/resources_$(CRD_VERSION_SUFFIX) +MKFILE_DIR ?= $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +TESTS_DIR ?= $(MKFILE_DIR)tests +RESOURCES_DIR ?= $(MKFILE_DIR)resources +NAMESPACE ?= kubewarden +CLUSTER_CONTEXT ?= $(shell kubectl config current-context) # ================================================================================================== -# Aliases -kube = kubectl --context $(CLUSTER_CONTEXT) $(1) -helm = helm --kube-context $(CLUSTER_CONTEXT) $(1) -bats = RESOURCES_DIR=$(RESOURCES_DIR) \ - KUBEWARDEN_CRDS_CHART_OLD_VERSION=$(KUBEWARDEN_CRDS_CHART_OLD_VERSION) \ - KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION=$(KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION) \ - KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION=$(KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION) \ - KUBEWARDEN_CRDS_CHART_VERSION=$(KUBEWARDEN_CRDS_CHART_VERSION) \ - KUBEWARDEN_DEFAULTS_CHART_VERSION=$(KUBEWARDEN_DEFAULTS_CHART_VERSION) \ - KUBEWARDEN_CONTROLLER_CHART_VERSION=$(KUBEWARDEN_CONTROLLER_CHART_VERSION) \ - KUBEWARDEN_CHARTS_LOCATION=$(KUBEWARDEN_CHARTS_LOCATION) \ - KUBEWARDEN_HELM_REPO_NAME=$(KUBEWARDEN_HELM_REPO_NAME) \ - CLUSTER_CONTEXT=$(CLUSTER_CONTEXT) \ - NAMESPACE=$(NAMESPACE) \ - bats -T --print-output-on-failure $(1) +# Optional arguments for scripts -helm_in = $(helm) upgrade --install --wait --namespace $(NAMESPACE) --create-namespace +# cluster_k3d.sh: +# K3S=[1.30] - short|long version +# CLUSTER_NAME=[k3d-default] -# ================================================================================================== -# Macros -define install-kubewarden = - $(helm_in) kubewarden-crds $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-crds --version "$(KUBEWARDEN_CRDS_CHART_VERSION)" - $(helm_in) kubewarden-controller $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-controller --version "$(KUBEWARDEN_CONTROLLER_CHART_VERSION)" - $(helm_in) kubewarden-defaults $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults --version "$(KUBEWARDEN_DEFAULTS_CHART_VERSION)" - $(kube) wait --for=condition=Ready --namespace $(NAMESPACE) pods --all -endef - -define generate-versioned-resources-dir = - ./scripts/generate_resources_dir.sh $(ROOT_RESOURCES_DIR) $(CRD_VERSION) -endef +# helmer.sh: +# VERSION=[next|prev|v1.17.0-rc2|local] (app version) +# REPO_NAME=[kubewarden] +# CHARTS_LOCATION=[./dirname|reponame] +# LATEST=[1] +# CRDS_ARGS, DEFAULTS_ARGS, CONTROLLER_ARGS # ================================================================================================== # Targets -# Destructive tests that reinstall kubewarden -# Test is responsible for used kubewarden version -upgrade:: - $(generate-versioned-resources-dir) - $(call bats, $(TESTS_DIR)/upgrade.bats) +.PHONY: clean cluster install upgrade uninstall tests all # Generate target for every test file -TESTS := $(notdir $(wildcard tests/*.bats)) -$(TESTS):: - $(generate-versioned-resources-dir) - $(call bats, $(TESTS_DIR)/$@) +TESTFILES := $(notdir $(wildcard tests/*.bats)) +$(TESTFILES): + @RESOURCES_DIR=$(RESOURCES_DIR) \ + NAMESPACE=$(NAMESPACE) \ + CLUSTER_CONTEXT=$(CLUSTER_CONTEXT) \ + bats -T --print-output-on-failure $(TESTS_DIR)/$@ # Target all non-destructive tests -.PHONY: tests -tests: $(filter-out upgrade.bats audit-scanner-installation.bats, $(TESTS)) - -.PHONY: cluster install reinstall clean +tests: $(filter-out upgrade.bats audit-scanner-installation.bats, $(TESTFILES)) cluster: - k3d cluster create $(CLUSTER_NAME) -s 1 -a 1 --wait -v /dev/mapper:/dev/mapper - $(kube) wait --for=condition=Ready nodes --all + ./scripts/cluster_k3d.sh create + +install: check + ./scripts/helmer.sh install -install: - $(install-kubewarden) +upgrade: + ./scripts/helmer.sh upgrade + $(MAKE) upgrade.bats + +uninstall: + ./scripts/helmer.sh uninstall clean: - k3d cluster delete $(CLUSTER_NAME) + ./scripts/cluster_k3d.sh delete + +all: clean cluster install tests -reinstall: clean cluster install +check: + @yq --version | grep mikefarah > /dev/null || { echo "yq is not the correct, needs mikefarah/yq!"; exit 1; } + @jq --version > /dev/null || { echo "jq is not installed!"; exit 1; } + @k3d --version > /dev/null || { echo "k3d is not installed!"; exit 1; } + @bats --version > /dev/null || { echo "bats is not installed!"; exit 1; } diff --git a/helpers/helpers.sh b/helpers/helpers.sh new file mode 100644 index 0000000..05fd0a0 --- /dev/null +++ b/helpers/helpers.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +bats_require_minimum_version 1.7.0 + +load "../helpers/bats-support/load.bash" +load "../helpers/bats-assert/load.bash" +load "../helpers/kubelib.sh" + +# ================================================================================================== +# Functions specific to kubewarden tests (require bats) + +kubectl() { command kubectl --context "$CLUSTER_CONTEXT" --warnings-as-errors "$@"; } +helm() { command helm --kube-context "$CLUSTER_CONTEXT" "$@"; } +helmer() { $BATS_TEST_DIRNAME/../scripts/helmer.sh "$@"; } + +# Export for retry function (subshell) +export -f kubectl helm + +# ================================================================================================== + +trigger_audit_scan() { + local jobname=${1:-auditjob} + kubectl create job --from=cronjob/audit-scanner $jobname --namespace $NAMESPACE | grep "$jobname created" + kubectl wait --timeout=3m --for=condition="Complete" job $jobname --namespace $NAMESPACE + kubectl delete job $jobname --namespace $NAMESPACE +} + +# Run & delete pod with optional parameters. Check exit code. +# kuberun [-0|-1|-N|!] "--privileged" +function kuberun { + local status=-0 + [[ $1 =~ ^([!]|-[0-9]+)$ ]] && status="$1" && shift + run "$status" kubectl run "pod-$(date +%s)" --image=busybox --restart=Never --rm -it --command "$@" -- true +} + +# Run kubectl action which should fail on pod privileged policy +function kubefail_privileged { + run kubectl "$@" + assert_failure 1 + assert_output --regexp '^Error.*: admission webhook.*denied the request.*container is not allowed$' +} + +# Prepend policies with RESOURCE dir if file doesn't contain '/' +policypath() { [[ "$1" == */* ]] && echo "$1" || echo "$RESOURCES_DIR/policies/$1"; } + +# Deploy from pipe or resources dir (if parameter doesn't contain '/') +# Detect policy kind and wait for it to be active and uniquely reachable +# Works only with default policy server +function apply_policy { + [ "${1:-}" = '--no-wait' ] && local nowait=true && shift + + # Handle policy yaml from pipe (-p /dev/stdin fails on github runner) + if [ $# -eq 0 ]; then + local tempfile=$(mktemp -p "$BATS_RUN_TMPDIR" policy-XXXXX.yaml) + cat > "$tempfile" + fi + + # Apply the policy and delete tempfile + local pfile=${tempfile:-$(policypath "$1")} + local kind=$(yq '.kind' "$pfile") + kubectl apply -f "$pfile" + + # Wait for the policy to be active and uniquely reachable + if [ ! -v nowait ]; then + wait_for --for=condition="PolicyActive" "$kind" --all -A + wait_policyserver default + wait_for --for=condition="PolicyUniquelyReachable" "$kind" --all -A + fi +} + +function delete_policy { + local pfile=$(policypath "$1") + kubectl delete --wait -f "$pfile" "${@:2}" +} + +# wait_policies [condition] - at least one policy must exist +function wait_policies { + for chart in ${1:-PolicyActive PolicyUniquelyReachable}; do + wait_for --for=condition="$1" admissionpolicies,clusteradmissionpolicies,admissionpolicygroups,clusteradmissionpolicygroups --all -A + done +} + +# wait_policyserver [name] +function wait_policyserver { + local name="${1:-default}" + # Wait for specific revision to prevent changes during rollout + revision=$(kubectl -n $NAMESPACE get "deployment/policy-server-$name" -o json | jq -er '.metadata.annotations."deployment.kubernetes.io/revision"') + wait_rollout -n $NAMESPACE --revision $revision "deployment/policy-server-$name" + # Wait for final rollout? + wait_rollout -n $NAMESPACE "deployment/policy-server-$name" +} diff --git a/helpers/kubelib.sh b/helpers/kubelib.sh new file mode 100644 index 0000000..0257b78 --- /dev/null +++ b/helpers/kubelib.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set -aeEuo pipefail + +# ================================================================================================== +# General helpers + +log () { printf -- "$(date +%R) \e[${1}m${*:2}\e[0m\n"; } +step () { log 32 "${*}" $(basename "${BASH_SOURCE[1]/${BASH_SOURCE}}" | sed 's/.\+/[&]/'); } # print test module +info () { log 0 " ${*}"; } +warn () { log 33 " ${*}"; } +error() { log 31 " ${*}"; } + +# ================================================================================================== +# Kubernetes helpers + +yq() { command yq -e "$@"; } +jq() { command jq -e "$@"; } + +# Export for retry function (subshell) +export -f yq jq + +function retry() { + local cmd=$1 + local tries=${2:-15} + local delay=${3:-20} + local i + + for ((i=1; i<=tries; i++)); do + timeout 25 bash -c "$cmd" && break || echo "RETRY #$i: $cmd" + [ $i -ne $tries ] && sleep $delay || { echo "Godot: $cmd"; false; } + done +} + +# Safe version of waiting for pods. Looks in kube-system ns by default +# Handles kube-api disconnects during upgrade +function wait_pods() { + local i output + for i in {1..20}; do + output=$(kubectl get pods --no-headers -o wide ${@:--n kubewarden} | grep -vw Completed || echo 'Fail') + grep -vE '([0-9]+)/\1 +Running' <<< $output || break + [ $i -ne 20 ] && sleep 30 || { echo "Godot: pods not running"; false; } + done +} + +# Safe version of waiting for nodes +# Handles kube-api disconnects during upgrade +function wait_nodes() { + local i output + for i in {1..20}; do + output=$(kubectl get nodes --no-headers ${@:-} || echo 'Fail') + grep -vE '\bReady\b' <<< $output || break + [ $i -ne 20 ] && sleep 30 || { echo "Godot: nodes not running"; false; } + done +} + +function wait_for () { kubectl wait --timeout=5m "$@"; } +function wait_rollout() { kubectl rollout status --timeout=5m "$@"; } + +# Wait for cluster to come up after reboot +function wait_cluster() { + retry "kubectl cluster-info" 20 30 + wait_nodes + wait_pods +} diff --git a/resources/opentelemetry-prometheus-values.yaml b/resources/opentelemetry-prometheus.yaml similarity index 100% rename from resources/opentelemetry-prometheus-values.yaml rename to resources/opentelemetry-prometheus.yaml diff --git a/resources/opentelemetry-kw-telemetry-values.yaml b/resources/opentelemetry-telemetry.yaml similarity index 100% rename from resources/opentelemetry-kw-telemetry-values.yaml rename to resources/opentelemetry-telemetry.yaml diff --git a/resources/context-aware-policy.yaml b/resources/policies/context-aware-policy.yaml similarity index 100% rename from resources/context-aware-policy.yaml rename to resources/policies/context-aware-policy.yaml diff --git a/resources/mutate-policy-with-flag-disabled.yaml b/resources/policies/mutate-policy-with-flag-disabled.yaml similarity index 92% rename from resources/mutate-policy-with-flag-disabled.yaml rename to resources/policies/mutate-policy-with-flag-disabled.yaml index cdb54e2..44b2385 100644 --- a/resources/mutate-policy-with-flag-disabled.yaml +++ b/resources/policies/mutate-policy-with-flag-disabled.yaml @@ -1,5 +1,5 @@ --- -apiVersion: policies.kubewarden.io/v1alpha2 +apiVersion: policies.kubewarden.io/v1 kind: ClusterAdmissionPolicy metadata: name: psp-user-group-disabled diff --git a/resources/mutate-policy-with-flag-enabled.yaml b/resources/policies/mutate-policy-with-flag-enabled.yaml similarity index 92% rename from resources/mutate-policy-with-flag-enabled.yaml rename to resources/policies/mutate-policy-with-flag-enabled.yaml index ebde1c6..fca11dd 100644 --- a/resources/mutate-policy-with-flag-enabled.yaml +++ b/resources/policies/mutate-policy-with-flag-enabled.yaml @@ -1,5 +1,5 @@ --- -apiVersion: policies.kubewarden.io/v1alpha2 +apiVersion: policies.kubewarden.io/v1 kind: ClusterAdmissionPolicy metadata: name: psp-user-group-enabled diff --git a/resources/namespace-label-propagator-policy.yaml b/resources/policies/namespace-label-propagator-policy.yaml similarity index 100% rename from resources/namespace-label-propagator-policy.yaml rename to resources/policies/namespace-label-propagator-policy.yaml diff --git a/resources/namespace-psa-label-enforcer-policy.yaml b/resources/policies/namespace-psa-label-enforcer-policy.yaml similarity index 100% rename from resources/namespace-psa-label-enforcer-policy.yaml rename to resources/policies/namespace-psa-label-enforcer-policy.yaml diff --git a/resources/policy-pod-privileged.yaml b/resources/policies/policy-pod-privileged.yaml similarity index 86% rename from resources/policy-pod-privileged.yaml rename to resources/policies/policy-pod-privileged.yaml index 11e3ef8..71c49a9 100644 --- a/resources/policy-pod-privileged.yaml +++ b/resources/policies/policy-pod-privileged.yaml @@ -1,4 +1,4 @@ -apiVersion: policies.kubewarden.io/v1alpha2 +apiVersion: policies.kubewarden.io/v1 kind: AdmissionPolicy metadata: name: pod-privileged diff --git a/resources/privileged-pod-policy-monitor.yaml b/resources/policies/privileged-pod-policy-monitor.yaml similarity index 87% rename from resources/privileged-pod-policy-monitor.yaml rename to resources/policies/privileged-pod-policy-monitor.yaml index 3e01196..271a9a6 100644 --- a/resources/privileged-pod-policy-monitor.yaml +++ b/resources/policies/privileged-pod-policy-monitor.yaml @@ -1,4 +1,4 @@ -apiVersion: policies.kubewarden.io/v1alpha2 +apiVersion: policies.kubewarden.io/v1 kind: ClusterAdmissionPolicy metadata: name: privileged-pods diff --git a/resources/privileged-pod-policy.yaml b/resources/policies/privileged-pod-policy.yaml similarity index 91% rename from resources/privileged-pod-policy.yaml rename to resources/policies/privileged-pod-policy.yaml index 73de21d..5184758 100644 --- a/resources/privileged-pod-policy.yaml +++ b/resources/policies/privileged-pod-policy.yaml @@ -1,4 +1,4 @@ -apiVersion: policies.kubewarden.io/v1alpha2 +apiVersion: policies.kubewarden.io/v1 kind: ClusterAdmissionPolicy metadata: name: privileged-pods diff --git a/resources/psp-user-group-policy.yaml b/resources/policies/psp-user-group-policy.yaml similarity index 100% rename from resources/psp-user-group-policy.yaml rename to resources/policies/psp-user-group-policy.yaml diff --git a/resources/safe-labels-namespace.yaml b/resources/policies/safe-labels-namespace.yaml similarity index 100% rename from resources/safe-labels-namespace.yaml rename to resources/policies/safe-labels-namespace.yaml diff --git a/resources/safe-labels-pods-policy.yaml b/resources/policies/safe-labels-pods-policy.yaml similarity index 100% rename from resources/safe-labels-pods-policy.yaml rename to resources/policies/safe-labels-pods-policy.yaml diff --git a/scripts/cluster_k3d.sh b/scripts/cluster_k3d.sh new file mode 100755 index 0000000..80310e2 --- /dev/null +++ b/scripts/cluster_k3d.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +set -aeEuo pipefail +# trap 'echo "Error on ${BASH_SOURCE/$PWD/.}:${LINENO} $(sed -n "${LINENO} s/^\s*//p" $PWD/${BASH_SOURCE/$PWD})"' ERR + +. "$(dirname "$0")/../helpers/kubelib.sh" + +# Optional variables +K3S=${K3S:-$(k3d version -o json | jq -r '.k3s')} +CLUSTER_NAME=${CLUSTER_NAME:-k3s-default} +MASTER_COUNT=${MASTER_COUNT:-1} +WORKER_COUNT=${WORKER_COUNT:-1} + +# Complete partial K3S version from dockerhub +if [[ ! $K3S =~ ^v[0-9.]+-k3s[0-9]$ ]]; then + K3S=$(curl -L -s "https://registry.hub.docker.com/v2/repositories/rancher/k3s/tags?page_size=20&name=$K3S" | jq -re 'first(.results[].name | select(test("^v[0-9.]+-k3s[0-9]$")))') + echo "K3S version: $K3S" +fi + +# Create new cluster +if [ "${1:-}" == 'create' ]; then + # /dev/mapper: https://k3d.io/v5.7.4/faq/faq/#issues-with-btrfs + k3d cluster create $CLUSTER_NAME --wait \ + --image rancher/k3s:$K3S \ + -s $MASTER_COUNT -a $WORKER_COUNT \ + --registry-create k3d-$CLUSTER_NAME-registry \ + -v /dev/mapper:/dev/mapper + wait_pods -n kube-system +fi + +# Delete existing cluster +if [ "${1:-}" == 'delete' ]; then + k3d cluster delete $CLUSTER_NAME +fi + +# Return 0 if cluster exists otherwise non 0 +if [ "${1:-}" == 'status' ]; then + k3d cluster list $CLUSTER_NAME &>/dev/null +fi + +: diff --git a/scripts/generate_resources_dir.sh b/scripts/generate_resources_dir.sh deleted file mode 100755 index 93f19fe..0000000 --- a/scripts/generate_resources_dir.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -export CRD=$2 -SOURCEDIR="$1" -TARGETDIR="$1/resources_${CRD#*/}" - -# Copy files to versioned directory -mkdir -p $TARGETDIR -cp -a $SOURCEDIR/*.yaml $TARGETDIR - -# There are 2 implementations of yq, from mikefarah and kislyuk -# yq_kislyuk=(yq -i -Y '.apiVersion = env.CRD') -# yq_mikefarah=(yq eval -i '.apiVersion = env(CRD)') - -# Github runners default to mikefarah, let's support both for local runs -# yq --version | grep -q mikefarah && yqcmd=("${yq_mikefarah[@]}") || yqcmd=("${yq_kislyuk[@]}") - -# Replace apiVersion: policies.kubewarden.io/* -> policies.kubewarden.io/v1 -# grep -rlZ "apiVersion: policies.kubewarden.io" "$TARGETDIR" | xargs -0 -I {} "${yqcmd[@]}" {} - -grep -rlZ "apiVersion: policies.kubewarden.io" "$TARGETDIR" | xargs -0 -I {} yq -i '.apiVersion = env(CRD)' {} diff --git a/scripts/helmer.sh b/scripts/helmer.sh new file mode 100755 index 0000000..d985fdd --- /dev/null +++ b/scripts/helmer.sh @@ -0,0 +1,206 @@ +#!/usr/bin/env bash +set -aeEuo pipefail +# trap 'echo "Error on ${BASH_SOURCE/$PWD/.}:${LINENO} $(sed -n "${LINENO} s/^\s*//p" $PWD/${BASH_SOURCE/$PWD})"' ERR + +. "$(dirname "$0")/../helpers/kubelib.sh" + +# ================================================================================================== +# Global variables & Checks - Used only in install & upgrade actions + +NAMESPACE=${NAMESPACE:-kubewarden} +# Kubewarden helm repository +REPO_NAME=${REPO_NAME:-kubewarden} +# Use charts from [./dirname|reponame] +CHARTS_LOCATION=${CHARTS_LOCATION:-$REPO_NAME} +# [next|prev|v1.17.0-rc2|local] +VERSION=${VERSION:-$( [[ "$CHARTS_LOCATION" == */* ]] && echo "local" || echo "next" )} + +# Extra parameters for helm install +CRDS_ARGS="${CRDS_ARGS:-}" +CONTROLLER_ARGS="${CONTROLLER_ARGS:-}" +DEFAULTS_ARGS="${DEFAULTS_ARGS:-}" +# Use latest tag for main images +if [ -n "${LATEST:-}" ]; then + DEFAULTS_ARGS="--set policyServer.image.tag=latest $DEFAULTS_ARGS" + CONTROLLER_ARGS="--set image.tag=latest --set auditScanner.image.tag=latest $CONTROLLER_ARGS" +fi + +# Add missing "v" prefix +[[ $VERSION =~ ^[1-9] ]] && VERSION="v$VERSION" + +# Check if local charts are available +[ "$VERSION" = local ] && test -d "$CHARTS_LOCATION/kubewarden-crds" + +# Remove kubewarden- prefix from chart name +[ $# -gt 1 ] && set -- "$1" "${2/#kubewarden-}" "${@:3}" + +# Second parameter must be short chart name or empty +[[ ${2:-} =~ ^(crds|controller|defaults)?$ ]] || { echo "Bad chart: $2"; exit 1; } + +# ================================================================================================== +# Configuration of helm versions & values + +print_env() { + # Main parameters + echo NAMESPACE=\"$NAMESPACE\" + echo REPO_NAME=\"$REPO_NAME\" + echo CHARTS_LOCATION=\"$CHARTS_LOCATION\" + echo VERSION=\"$VERSION\" + # Extra parameters + echo CRDS_ARGS=\"$CRDS_ARGS\" + echo CONTROLLER_ARGS=\"$CONTROLLER_ARGS\" + echo DEFAULTS_ARGS=\"$DEFAULTS_ARGS\" + # Version map + for key in "${!vMap[@]}"; do + echo vMap[$key]=\"${vMap[$key]}\" + done +} + +load_env() { source /tmp/helmer.env; } + +# Parse app $VERSION into chart versions (vMap) +declare -A vMap +make_version_map() { + # Use tempfile to avoid handling quotes in json > bash + local tempfile=$(mktemp -p "${BATS_RUN_TMPDIR:-/tmp}" vermap-XXXXX.json) + + if [ "$VERSION" != local ]; then + # Do single helm search query to speed up the process + helm repo update $REPO_NAME --fail-on-repo-update-fail >/dev/null + helm search repo --fail-on-no-result $REPO_NAME/ --versions --devel -o json \ + | jq -ec --arg appv "$VERSION" ' + def remap(items): items | unique_by(.name) | map({(.name): .version}) | add; + unique_by(.name) as $latest | + { + appv: remap(map(select(.app_version == $appv))), + next: remap($latest), + prev: remap(map(select(.app_version != $latest[0].app_version and (.app_version | contains("-rc") | not)))), + }' > "$tempfile" + fi + + # Load $VERSION from json into vMap array + for chart in crds controller defaults; do + case $VERSION in + # Next: last version from helm search kubewarden --devel + next) vMap["$chart"]=$(jq -er --arg c $chart '.next["kubewarden/kubewarden-" + $c]' "$tempfile") ;; + # Prev: previous stable version (older than next and not -rc) + prev) vMap["$chart"]=$(jq -er --arg c $chart '.prev["kubewarden/kubewarden-" + $c]' "$tempfile") ;; + # App: Exact helm app version: v1.17.0-rc2 + v[1-9]*) vMap["$chart"]=$(jq -er --arg c $chart '.appv["kubewarden/kubewarden-" + $c]' "$tempfile") ;; + # Local: directory with kw charts (kubewarden-crds, kubewarden-controller, kubewarden-defaults) + local) vMap["$chart"]=$(helm show chart $CHARTS_LOCATION/kubewarden-$chart | yq '.version') ;; + *) echo "Bad VERSION: ${VERSION:-}"; exit 1;; + esac + done + rm "$tempfile" + + # Save initial settings + print_env > /tmp/helmer.env +} + +# ================================================================================================== +# Install & Upgrade kubewarden (change chart version) + +# Usage: helm_in kubewarden-crds [--params ..] +helm_in() { helm install --wait --wait-for-jobs --namespace $NAMESPACE "${@:2}" "$1" "$CHARTS_LOCATION/$1"; } + +# Install selected $VERSION +do_install() { + echo "Install $VERSION: ${vMap[*]}" + # Cert-manager is required by Kubewarden <= v1.16.0 (crds < 1.9.0) + if printf '%s\n' "${vMap[crds]}" "1.8.9" | sort -V -C; then + helm repo add jetstack https://charts.jetstack.io --force-update + helm upgrade -i --wait cert-manager jetstack/cert-manager -n cert-manager --create-namespace --set crds.enabled=true + fi + + local argsvar + for chart in ${1:-crds controller defaults}; do + argsvar=${chart^^}_ARGS + helm_in kubewarden-$chart --create-namespace --version "${vMap[$chart]}" ${!argsvar} "${@:2}" + done + + if [ "${1:-defaults}" = 'defaults' ]; then + # Initial deployment of policy-server-default is created by controller with delay + retry "kubectl get -n $NAMESPACE deployment/policy-server-default" 2 + wait_rollout -n $NAMESPACE deployment/policy-server-default + fi + return 0 +} + +# Upgrade version and restore current values (helm recommends) +do_upgrade() { + echo "Upgrade to $VERSION: ${vMap[*]}" + + local argsvar + for chart in ${1:-crds controller defaults}; do + argsvar=${chart^^}_ARGS + # Look into --reset-then-reuse-values helm flag as replacement + helm get values kubewarden-$chart -n $NAMESPACE -o yaml > /tmp/chart-values.yaml + helm upgrade kubewarden-$chart -n $NAMESPACE $CHARTS_LOCATION/kubewarden-$chart --wait \ + --version "${vMap[$chart]}" --values /tmp/chart-values.yaml ${!argsvar} "${@:2}" + done + [ "${1:-defaults}" == 'defaults' ] && wait_rollout -n $NAMESPACE deployment/policy-server-default + + # Cert-manager is not required by Kubewarden >= v1.17.0 (crds >= 1.9.0) + if printf '%s\n' "1.9.0" "${vMap[crds]}" | sort -V -C; then + helm uninstall --wait cert-manager -n cert-manager --ignore-not-found + fi +} + +do_uninstall() { + echo "Uninstall kubewarden: ${1:-charts}" + for chart in ${1:-defaults controller crds}; do + helm uninstall --wait --namespace $NAMESPACE kubewarden-$chart "${@:2}" + done +} + +# Modify installed chart values & keep version +do_set() { + local chart="$1" + + local ver=$(helm get metadata kubewarden-$chart -n $NAMESPACE -o json | jq -er '.version') + helm upgrade kubewarden-$chart $CHARTS_LOCATION/kubewarden-$chart -n $NAMESPACE --wait --wait-for-jobs \ + --version "$ver" --reuse-values "${@:2}" + + [ "$1" = 'defaults' ] && wait_rollout -n $NAMESPACE deployment/policy-server-default + return 0 +} + +do_reset() { + local chart="$1" + local argsvar=${chart^^}_ARGS + + local ver=$(helm get metadata kubewarden-$chart -n $NAMESPACE -o json | jq -er '.version') + helm upgrade kubewarden-$chart $CHARTS_LOCATION/kubewarden-$chart -n $NAMESPACE --wait --wait-for-jobs \ + --version "$ver" --reset-values ${!argsvar} "${@:2}" + + # Wait for pods to be ready + [ "$1" = 'defaults' ] && wait_rollout -n $NAMESPACE deployment/policy-server-default + return 0 +} + +case $1 in + in|install|up|upgrade) + make_version_map;;& + reinstall|uninstall|set|reset) + load_env;;& + + in|install) do_install "${@:2}";; + up|upgrade) do_upgrade "${@:2}";; + reinstall) do_install "${@:2}";; + uninstall) do_uninstall "${@:2}";; + set) do_set $2 "${@:3}";; + reset) do_reset $2 "${@:3}";; + debug) + echo "### Helmer env:" + cat /tmp/helmer.env + echo "### Helm ls:" + helm ls -n $NAMESPACE -o json | jq ".[].chart" + echo "### Current charts values:" + for chart in crds controller defaults; do + helm get values kubewarden-$chart -n $NAMESPACE + done + ;; + *) + echo "Bad command: $1"; exit 1;; +esac diff --git a/tests/audit-scanner-installation.bats b/tests/audit-scanner-installation.bats index 7ce2d94..d02dfe3 100644 --- a/tests/audit-scanner-installation.bats +++ b/tests/audit-scanner-installation.bats @@ -1,15 +1,21 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods -n kube-system + load ../helpers/helpers.sh + wait_pods -n kube-system } CRD_BASE=https://github.com/kubernetes-sigs/wg-policy-prototypes/raw/master/policy-report/crd/v1alpha2/ +function kubewarden_uninstall { + helmer uninstall defaults --ignore-not-found + helmer uninstall controller + helmer uninstall crds +} + # assert_crds true|false function assert_crds { - run kubectl api-resources + run kubectl api-resources --no-headers if $1; then assert_output -p 'ClusterPolicyReport' assert_output -p 'PolicyReport' @@ -30,14 +36,14 @@ function assert_cronjob { } @test "[Audit Scanner] Reconfigure audit scanner" { - helm_up kubewarden-controller --reuse-values --set auditScanner.cronJob.schedule="*/30 * * * *" + helmer set kubewarden-controller --set auditScanner.cronJob.schedule="*/30 * * * *" run kubectl get cronjob -n $NAMESPACE assert_output -p audit-scanner assert_output -p "*/30 * * * *" } @test "[Audit Scanner] Audit scanner resources are cleaned with kubewarden" { - kubewarden_remove + kubewarden_uninstall assert_crds false assert_cronjob false } @@ -49,8 +55,8 @@ function assert_cronjob { assert_crds true # Install kubewarden with existing policyreport crds - helm_in kubewarden-crds --set installPolicyReportCRDs=False - helm_in kubewarden-controller + helmer reinstall crds --set installPolicyReportCRDs=False + helmer reinstall controller assert_cronjob true # Check policy reports did not come from helm (have no labels) @@ -58,7 +64,7 @@ function assert_cronjob { kubectl get crds clusterpolicyreports.wgpolicyk8s.io -o json | jq -e '.metadata.labels == null' # Kubewarden should not remove custom crds - kubewarden_remove + kubewarden_uninstall assert_crds true assert_cronjob false @@ -68,8 +74,7 @@ function assert_cronjob { } @test "[Audit Scanner] Install with CRDs from Kubewarden Helm charts" { - helm_in kubewarden-crds - helm_in kubewarden-controller + helmer reinstall assert_crds true assert_cronjob true diff --git a/tests/audit-scanner.bats b/tests/audit-scanner.bats index 61825d8..cf5fd29 100644 --- a/tests/audit-scanner.bats +++ b/tests/audit-scanner.bats @@ -1,15 +1,15 @@ #!/usr/bin/env bats setup() { - load common.bash + load ../helpers/helpers.sh wait_pods -n kube-system } -trigger_audit_scan() { - local jobname=${1:-testing} - kubectl delete --ignore-not-found job $jobname --namespace $NAMESPACE - kubectl create job --from=cronjob/audit-scanner $jobname --namespace $NAMESPACE | grep "$jobname created" - kubectl wait --for=condition="Complete" job $jobname --namespace $NAMESPACE +teardown_file() { + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A + kubectl delete ns testing-audit-scanner --ignore-not-found } # get_report "pod/podname" @@ -18,9 +18,9 @@ get_report() { # Find resource UID local ruid=$(kubectl get $resource -o jsonpath='{.metadata.uid}') # Figure out if resource report is namespaced or not - kubectl api-resources --namespaced=false | grep -qw ${resource%/*} && rtype=cpolr || rtype=polr + kubectl api-resources --no-headers --namespaced=false | grep -w ${resource%/*} >/dev/null && rtype=cpolr || rtype=polr # Print resource report - kubectl get $rtype $ruid -o json + kubectl get $rtype $ruid -o json | jq -c '.' } # check_report_summary "$report" 2 0 @@ -37,7 +37,7 @@ check_report_summary() { check_report_result() { local report="$1" local result="$2" - local policy="$3" + local policy="${3:-}" [[ $result =~ ^(pass|fail|null)$ ]] @@ -56,7 +56,7 @@ check_report_result() { @test "[Audit Scanner] Install testing policies and resources" { # Make sure cronjob was created - kubectl get cronjob -n kubewarden audit-scanner + kubectl get cronjob -n $NAMESPACE audit-scanner # Launch unprivileged pod kubectl run nginx-unprivileged --image=nginx:alpine @@ -70,11 +70,11 @@ check_report_result() { kubectl create ns testing-audit-scanner kubectl label ns testing-audit-scanner cost-center=123 - # Deploy some policy - kubectl apply -f $RESOURCES_DIR/privileged-pod-policy.yaml - kubectl apply -f $RESOURCES_DIR/namespace-psa-label-enforcer-policy.yaml - kubectl apply -f $RESOURCES_DIR/safe-labels-namespace.yaml - apply_cluster_admission_policy $RESOURCES_DIR/safe-labels-pods-policy.yaml + # Deploy some policies + apply_policy --no-wait privileged-pod-policy.yaml + apply_policy --no-wait namespace-psa-label-enforcer-policy.yaml + apply_policy --no-wait safe-labels-namespace.yaml + apply_policy safe-labels-pods-policy.yaml trigger_audit_scan } @@ -107,9 +107,9 @@ check_report_result() { } @test "[Audit Scanner] Delete some policies and retrigger audit scan" { - kubectl delete -f $RESOURCES_DIR/safe-labels-pods-policy.yaml - kubectl delete -f $RESOURCES_DIR/namespace-psa-label-enforcer-policy.yaml - wait_for_default_policy_server_rollout + delete_policy safe-labels-pods-policy.yaml + delete_policy namespace-psa-label-enforcer-policy.yaml + wait_policyserver trigger_audit_scan } @@ -142,21 +142,11 @@ check_report_result() { } @test "[Audit Scanner] Delete all policy reports after all relevant policies" { - kubectl delete -f $RESOURCES_DIR/privileged-pod-policy.yaml - kubectl delete -f $RESOURCES_DIR/safe-labels-namespace.yaml - wait_for_default_policy_server_rollout + delete_policy privileged-pod-policy.yaml + delete_policy safe-labels-namespace.yaml + wait_policyserver trigger_audit_scan kubectl get policyreport -A 2>&1 | grep 'No resources found' kubectl get clusterpolicyreport 2>&1 | grep 'No resources found' } - -teardown_file() { - kubectl delete --ignore-not-found -f $RESOURCES_DIR/privileged-pod-policy.yaml - kubectl delete --ignore-not-found -f $RESOURCES_DIR/namespace-label-propagator-policy.yaml - kubectl delete --ignore-not-found -f $RESOURCES_DIR/safe-labels-namespace.yaml - kubectl delete --ignore-not-found -f $RESOURCES_DIR/safe-labels-pods-policy.yaml - kubectl delete --ignore-not-found ns testing-audit-scanner - kubectl delete --ignore-not-found pod nginx-privileged nginx-unprivileged - kubectl delete --ignore-not-found jobs -n kubewarden testing -} diff --git a/tests/basic-end-to-end-tests.bats b/tests/basic-end-to-end-tests.bats index 8646cd1..ad3c211 100644 --- a/tests/basic-end-to-end-tests.bats +++ b/tests/basic-end-to-end-tests.bats @@ -1,63 +1,63 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - kubectl delete pods --all - kubectl delete clusteradmissionpolicies --all - kubectl delete -f $RESOURCES_DIR/policy-server.yaml --ignore-not-found + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A } # Create pod-privileged policy to block CREATE & UPDATE of privileged pods @test "[Basic end-to-end tests] Apply pod-privileged policy that blocks CREATE & UPDATE" { - apply_cluster_admission_policy $RESOURCES_DIR/privileged-pod-policy.yaml + apply_policy privileged-pod-policy.yaml - # Launch unprivileged pod - kubectl run nginx-unprivileged --image=nginx:alpine - kubectl wait --for=condition=Ready pod nginx-unprivileged + # Launch unprivileged pod + kubectl run nginx-unprivileged --image=nginx:alpine + kubectl wait --for=condition=Ready pod nginx-unprivileged - # Launch privileged pod (should fail) - kubefail_privileged run pod-privileged --image=registry.k8s.io/pause --privileged + # Launch privileged pod (should fail) + kubefail_privileged run pod-privileged --image=registry.k8s.io/pause --privileged } # Update pod-privileged policy to block only UPDATE of privileged pods @test "[Basic end-to-end tests] Patch policy to block only UPDATE operation" { - yq '.spec.rules[0].operations = ["UPDATE"]' resources/privileged-pod-policy.yaml | kubectl apply -f - + yq '.spec.rules[0].operations = ["UPDATE"]' $RESOURCES_DIR/policies/privileged-pod-policy.yaml | kubectl apply -f - - # I can create privileged pods now - kubectl run nginx-privileged --image=nginx:alpine --privileged + # I can create privileged pods now + kubectl run nginx-privileged --image=nginx:alpine --privileged - # I can not update privileged pods - kubefail_privileged label pod nginx-privileged x=y + # I can not update privileged pods + kubefail_privileged label pod nginx-privileged x=y } @test "[Basic end-to-end tests] Delete ClusterAdmissionPolicy" { - kubectl delete --wait -f $RESOURCES_DIR/privileged-pod-policy.yaml + delete_policy privileged-pod-policy.yaml - # I can update privileged pods now - kubectl label pod nginx-privileged x=y + # I can update privileged pods now + kubectl label pod nginx-privileged x=y } @test "[Basic end-to-end tests] Apply mutating psp-user-group AdmissionPolicy" { - apply_admission_policy $RESOURCES_DIR/psp-user-group-policy.yaml + apply_policy psp-user-group-policy.yaml - # Policy should mutate pods - kubectl run pause-user-group --image registry.k8s.io/pause - kubectl wait --for=condition=Ready pod pause-user-group - kubectl get pods pause-user-group -o json | jq -e ".spec.containers[].securityContext.runAsUser==1000" + # Policy should mutate pods + kubectl run pause-user-group --image registry.k8s.io/pause + kubectl wait --for=condition=Ready pod pause-user-group + kubectl get pods pause-user-group -o json | jq -e ".spec.containers[].securityContext.runAsUser==1000" - kubectl delete --wait -f $RESOURCES_DIR/psp-user-group-policy.yaml + delete_policy psp-user-group-policy.yaml } @test "[Basic end-to-end tests] Launch & scale second policy server" { - kubectl apply -f $RESOURCES_DIR/policy-server.yaml - kubectl wait policyserver e2e-tests --for=condition=ServiceReconciled + kubectl apply -f $RESOURCES_DIR/policy-server.yaml + wait_for policyserver e2e-tests --for=condition=ServiceReconciled - kubectl patch policyserver e2e-tests --type=merge -p '{"spec": {"replicas": 2}}' - wait_rollout -n kubewarden deployment/policy-server-e2e-tests + kubectl patch policyserver e2e-tests --type=merge -p '{"spec": {"replicas": 2}}' + wait_policyserver e2e-tests - kubectl delete -f $RESOURCES_DIR/policy-server.yaml + kubectl delete -f $RESOURCES_DIR/policy-server.yaml } diff --git a/tests/common.bash b/tests/common.bash deleted file mode 100644 index 1cfed72..0000000 --- a/tests/common.bash +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env bash - -bats_require_minimum_version 1.7.0 - -load "../helpers/bats-support/load.bash" -load "../helpers/bats-assert/load.bash" - -function kubectl() { - command kubectl --context $CLUSTER_CONTEXT "$@" -} - -function helm() { - command helm --kube-context $CLUSTER_CONTEXT "$@" -} - -# get_metrics policy-server-default -function get_metrics { - pod=$1 - ns=${2:-$NAMESPACE} - - kubectl delete pod curlpod --ignore-not-found - kubectl run curlpod -t -i --rm --image curlimages/curl:8.00.1 --restart=Never -- \ - --silent $pod.$ns.svc.cluster.local:8080/metrics -} - -# Upgrade helm chart, but won't install if it does not exist -function helm_up { - # set default version, can be overridden with parameters - case $1 in - 'kubewarden-controller') - def_version=$KUBEWARDEN_CONTROLLER_CHART_VERSION;; - 'kubewarden-defaults') - def_version=$KUBEWARDEN_DEFAULTS_CHART_VERSION;; - 'kubewarden-crds') - def_version=$KUBEWARDEN_CRDS_CHART_VERSION;; - esac - - helm upgrade --version "$def_version" --wait \ - --namespace $NAMESPACE --create-namespace \ - "${@:2}" $1 $KUBEWARDEN_CHARTS_LOCATION/$1 - - # kubewarden-defaults ignore wait param, so rollout status would fail without retry (does not exist yet) - # retry function requires full command, not a function - [ $1 = 'kubewarden-defaults' ] && retry "kubectl rollout status -n kubewarden deployment/policy-server-default" - return 0 -} - -function helm_rm { - helm uninstall --wait --namespace $NAMESPACE $1 -} - -# Install or upgrade helm chart -function helm_in { - helm_up $@ --install -} - -function kubewarden_remove { - helm list -n $NAMESPACE | grep '^kubewarden-defaults' && helm_rm kubewarden-defaults - helm list -n $NAMESPACE | grep '^kubewarden-controller' && helm_rm kubewarden-controller - helm list -n $NAMESPACE | grep '^kubewarden-crds' && helm_rm kubewarden-crds - return 0 -} - -function retry() { - local cmd=$1 - local tries=${2:-10} - local delay=${3:-30} - local i - - # export functions into bash -c env - [[ "$cmd" =~ get_metrics ]] && export -f get_metrics - - # Github runner is shared - we must use context for cluster commands - [[ "$cmd" == kubectl* ]] && cmd="${cmd/ / --context $CLUSTER_CONTEXT }" - [[ "$cmd" == helm* ]] && cmd="${cmd/ / --kube-context $CLUSTER_CONTEXT }" - - for ((i=1; i<=tries; i++)); do - timeout 25 bash -c "$cmd" && break || echo "RETRY #$i: $cmd" - [ $i -ne $tries ] && sleep $delay || { echo "Godot: $cmd"; false; } - done -} - -# Safe version of waiting for pods. Looks in kube-system ns by default -# Handles kube-api disconnects during upgrade -function wait_pods() { - local i output - for i in {1..20}; do - output=$(kubectl get pods --no-headers -o wide ${@:--n kubewarden} | grep -vw Completed || echo 'Fail') - grep -vE '([0-9]+)/\1 +Running' <<< $output || break - [ $i -ne 20 ] && sleep 30 || { echo "Godot: pods not running"; false; } - done -} - -# Safe version of waiting for nodes -# Handles kube-api disconnects during upgrade -function wait_nodes() { - local i output - for i in {1..20}; do - output=$(kubectl get nodes --no-headers ${@:-} || echo 'Fail') - grep -vE '\bReady\b' <<< $output || break - [ $i -ne 20 ] && sleep 30 || { echo "Godot: nodes not running"; false; } - done -} - -function wait_for () { kubectl wait --timeout=5m "$@"; } -function wait_rollout() { kubectl rollout status --timeout=5m "$@"; } - -# Wait for cluster to come up after reboot -function wait_cluster() { - retry "kubectl cluster-info" 20 30 - wait_nodes - wait_pods -} - -# Run kubectl action which should fail on pod privileged policy -function kubefail_privileged { - run kubectl "$@" - assert_failure 1 - assert_output --regexp '^Error.*: admission webhook.*denied the request.*container is not allowed$' -} - -function kubectl_apply_should_fail { - run kubectl apply -f $1 - assert_failure -} - -function kubectl_apply_should_fail_with_message { - run kubectl apply -f $1 - assert_failure - assert_output --partial "$2" -} - -function apply_cluster_admission_policy { - kubectl apply -f $1 - wait_for_cluster_admission_policy PolicyActive - wait_for_default_policy_server_rollout - wait_for_cluster_admission_policy PolicyUniquelyReachable -} - -function apply_admission_policy { - kubectl apply -f ${1:--} - wait_for_admission_policy PolicyActive - wait_for_default_policy_server_rollout - wait_for_admission_policy PolicyUniquelyReachable -} - -function wait_for_admission_policy { - wait_for --for=condition="$1" admissionpolicies --all -A -} - -function wait_for_cluster_admission_policy { - wait_for --for=condition="$1" clusteradmissionpolicies --all -} - -function wait_for_default_policy_server_rollout { - revision=$(kubectl -n $NAMESPACE get "deployment/policy-server-default" -o json | jq -er '.metadata.annotations."deployment.kubernetes.io/revision"') - wait_rollout -n $NAMESPACE --revision $revision "deployment/policy-server-default" -} diff --git a/tests/context-aware-requests-tests.bats b/tests/context-aware-requests-tests.bats index 638be26..bcdfb11 100644 --- a/tests/context-aware-requests-tests.bats +++ b/tests/context-aware-requests-tests.bats @@ -1,27 +1,28 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - kubectl delete pods --all - kubectl delete clusteradmissionpolicies --all + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A } # Same as in basic e2e tests? @test "[Context Aware Policy tests] Test mutating a Pod" { - apply_cluster_admission_policy $RESOURCES_DIR/context-aware-policy.yaml + apply_policy context-aware-policy.yaml - # Create Pod with the right annotation - kubectl create ns ctx-test - kubectl annotate namespaces ctx-test propagate.hello=world + # Create Pod with the right annotation + kubectl create ns ctx-test + kubectl annotate namespaces ctx-test propagate.hello=world - kubectl run --namespace ctx-test pause-user-group --image registry.k8s.io/pause - kubectl wait --for=condition=Ready pod --namespace ctx-test pause-user-group - kubectl get pod --namespace ctx-test pause-user-group -o json | jq -e '.metadata.labels["hello"]=="world"' - kubectl delete namespace ctx-test + kubectl run --namespace ctx-test pause-user-group --image registry.k8s.io/pause + kubectl wait --for=condition=Ready pod --namespace ctx-test pause-user-group + kubectl get pod --namespace ctx-test pause-user-group -o json | jq -e '.metadata.labels["hello"]=="world"' + kubectl delete namespace ctx-test - kubectl delete -f $RESOURCES_DIR/context-aware-policy.yaml + delete_policy context-aware-policy.yaml } diff --git a/tests/monitor-mode-tests.bats b/tests/monitor-mode-tests.bats index 98d46c1..a3c5e0b 100644 --- a/tests/monitor-mode-tests.bats +++ b/tests/monitor-mode-tests.bats @@ -1,35 +1,38 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - kubectl delete pods --all - kubectl delete clusteradmissionpolicies --all + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A } @test "[Monitor mode end-to-end tests] Install ClusterAdmissionPolicy in monitor mode" { - apply_cluster_admission_policy $RESOURCES_DIR/privileged-pod-policy-monitor.yaml + apply_policy privileged-pod-policy-monitor.yaml } @test "[Monitor mode end-to-end tests] Monitor mode should only log event" { - kubectl run nginx-privileged --image=nginx:alpine --privileged - run kubectl logs -n $NAMESPACE -lapp="kubewarden-policy-server-default" - assert_output -p "policy evaluation (monitor mode)" - assert_output -p "allowed: false" - assert_output -p "Privileged container is not allowed" - kubectl delete pod nginx-privileged + kubectl run nginx-privileged --image=nginx:alpine --privileged + run kubectl logs -n $NAMESPACE -lapp="kubewarden-policy-server-default" + assert_output -p "policy evaluation (monitor mode)" + assert_output -p "allowed: false" + assert_output -p "Privileged container is not allowed" + kubectl delete pod nginx-privileged } @test "[Monitor mode end-to-end tests] Transition to protect should block events" { - apply_cluster_admission_policy $RESOURCES_DIR/privileged-pod-policy.yaml + apply_policy privileged-pod-policy.yaml - # Launch privileged pod (should fail) - kubefail_privileged run pod-privileged --image=registry.k8s.io/pause --privileged + # Launch privileged pod (should fail) + kubefail_privileged run pod-privileged --image=registry.k8s.io/pause --privileged } @test "[Monitor mode end-to-end tests] Transition from protect to monitor should be disallowed" { - kubectl_apply_should_fail_with_message $RESOURCES_DIR/privileged-pod-policy-monitor.yaml "field cannot transition from protect to monitor. Recreate instead." + run kubectl apply -f "$RESOURCES_DIR/policies/privileged-pod-policy-monitor.yaml" + assert_failure + assert_output --partial "field cannot transition from protect to monitor. Recreate instead." } diff --git a/tests/mutating-requests-tests.bats b/tests/mutating-requests-tests.bats index 46d5ba6..6cc23dc 100644 --- a/tests/mutating-requests-tests.bats +++ b/tests/mutating-requests-tests.bats @@ -1,35 +1,36 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - kubectl delete pods --all - kubectl delete clusteradmissionpolicies --all + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A } # Same as in basic e2e tests? @test "[Mutation request tests] Test psp-user-group policy with mutating flag enabled" { - apply_cluster_admission_policy $RESOURCES_DIR/mutate-policy-with-flag-enabled.yaml + apply_policy mutate-policy-with-flag-enabled.yaml - # New pod should be mutated by the policy - kubectl run pause-user-group --image registry.k8s.io/pause - kubectl wait --for=condition=Ready pod pause-user-group - kubectl get pod pause-user-group -o json | jq -e ".spec.containers[].securityContext.runAsUser==1000" - kubectl delete pod pause-user-group + # New pod should be mutated by the policy + kubectl run pause-user-group --image registry.k8s.io/pause + kubectl wait --for=condition=Ready pod pause-user-group + kubectl get pod pause-user-group -o json | jq -e ".spec.containers[].securityContext.runAsUser==1000" + kubectl delete pod pause-user-group - kubectl delete -f $RESOURCES_DIR/mutate-policy-with-flag-enabled.yaml + delete_policy mutate-policy-with-flag-enabled.yaml } @test "[Mutation request tests] Test psp-user-group policy with mutating flag disabled" { - apply_cluster_admission_policy $RESOURCES_DIR/mutate-policy-with-flag-disabled.yaml + apply_policy mutate-policy-with-flag-disabled.yaml - # New pod should be rejected by psp-user-group-policy - run kubectl run pause-user-group --image registry.k8s.io/pause - assert_failure - assert_output --partial "The policy attempted to mutate the request, but it is currently configured to not allow mutations" + # New pod should be rejected by psp-user-group-policy + run kubectl run pause-user-group --image registry.k8s.io/pause + assert_failure + assert_output --partial "The policy attempted to mutate the request, but it is currently configured to not allow mutations" - kubectl delete -f $RESOURCES_DIR/mutate-policy-with-flag-disabled.yaml + delete_policy mutate-policy-with-flag-disabled.yaml } diff --git a/tests/namespaced-admission-policy-tests.bats b/tests/namespaced-admission-policy-tests.bats index 6ed4cf0..36a8538 100644 --- a/tests/namespaced-admission-policy-tests.bats +++ b/tests/namespaced-admission-policy-tests.bats @@ -1,49 +1,50 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - kubectl delete pods --all - kubectl delete -f $RESOURCES_DIR/policy-pod-privileged.yaml --ignore-not-found + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A } @test "[Namespaced AdmissionPolicy] Test AdmissionPolicy in default NS" { - apply_admission_policy $RESOURCES_DIR/policy-pod-privileged.yaml + apply_policy policy-pod-privileged.yaml - # Privileged pod in the default namespace (should fail) - kubefail_privileged run nginx-privileged --image=nginx:alpine --privileged + # Privileged pod in the default namespace (should fail) + kubefail_privileged run nginx-privileged --image=nginx:alpine --privileged - # Privileged pod in the kubewarden namespace (should work) - kubectl run nginx-privileged --image=nginx:alpine --privileged -n kubewarden - kubectl wait --for=condition=Ready pod nginx-privileged -n kubewarden - kubectl delete pod nginx-privileged -n kubewarden + # Privileged pod in the kubewarden namespace (should work) + kubectl run nginx-privileged --image=nginx:alpine --privileged -n $NAMESPACE + kubectl wait --for=condition=Ready pod nginx-privileged -n $NAMESPACE + kubectl delete pod nginx-privileged -n $NAMESPACE - # Unprivileged pod in default namespace (should work) - kubectl run nginx-unprivileged --image=nginx:alpine - kubectl wait --for=condition=Ready pod nginx-unprivileged - kubectl delete pod nginx-unprivileged + # Unprivileged pod in default namespace (should work) + kubectl run nginx-unprivileged --image=nginx:alpine + kubectl wait --for=condition=Ready pod nginx-unprivileged + kubectl delete pod nginx-unprivileged } @test "[Namespaced AdmissionPolicy] Update policy to check only UPDATE operations" { - yq '.spec.rules[0].operations = ["UPDATE"]' $RESOURCES_DIR/policy-pod-privileged.yaml | kubectl apply -f - + yq '.spec.rules[0].operations = ["UPDATE"]' $RESOURCES_DIR/policies/policy-pod-privileged.yaml | kubectl apply -f - - # I can create privileged pods now - kubectl run nginx-privileged --image=nginx:alpine --privileged + # I can create privileged pods now + kubectl run nginx-privileged --image=nginx:alpine --privileged - # I still can not update privileged pods - kubefail_privileged label pod nginx-privileged x=y - kubectl delete pod nginx-privileged + # I still can not update privileged pods + kubefail_privileged label pod nginx-privileged x=y + kubectl delete pod nginx-privileged } @test "[Namespaced AdmissionPolicy] Delete AdmissionPolicy to check restrictions are removed" { - kubectl delete -f $RESOURCES_DIR/policy-pod-privileged.yaml + delete_policy policy-pod-privileged.yaml - # I can create privileged pods - kubectl run nginx-privileged --image=nginx:alpine --privileged + # I can create privileged pods + kubectl run nginx-privileged --image=nginx:alpine --privileged - # I can update privileged pods - kubectl label pod nginx-privileged x=y + # I can update privileged pods + kubectl label pod nginx-privileged x=y } diff --git a/tests/opentelemetry-tests.bats b/tests/opentelemetry-tests.bats index 57426d0..c5619e2 100644 --- a/tests/opentelemetry-tests.bats +++ b/tests/opentelemetry-tests.bats @@ -5,10 +5,35 @@ # kubectl port-forward -n jaeger svc/my-open-telemetry-query 16686:16686 setup() { - load common.bash - wait_pods -n kube-system + load ../helpers/helpers.sh + wait_pods -n kube-system } +teardown_file() { + load ../helpers/helpers.sh + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A + kubectl delete pod nginx-privileged nginx-unprivileged --ignore-not-found + + # Remove installed apps + helm uninstall --wait -n jaeger jaeger-operator + helm uninstall --wait -n prometheus prometheus + helm uninstall --wait -n open-telemetry my-opentelemetry-operator + helm uninstall --wait -n cert-manager cert-manager + + helmer reset controller +} + +# get_metrics policy-server-default +function get_metrics { + pod=$1 + ns=${2:-$NAMESPACE} + + kubectl delete pod curlpod --ignore-not-found + kubectl run curlpod -t -i --rm --image curlimages/curl:8.10.1 --restart=Never -- \ + --silent $pod.$ns.svc.cluster.local:8080/metrics +} +export -f get_metrics # required by retry command + @test "[OpenTelemetry] Install OpenTelemetry, Prometheus, Jaeger" { # Required by OpenTelemetry helm repo add jetstack https://charts.jetstack.io --force-update @@ -24,9 +49,9 @@ setup() { # Prometheus helm repo add --force-update prometheus-community https://prometheus-community.github.io/helm-charts - helm upgrade -i --wait prometheus prometheus-community/kube-prometheus-stack \ + helm upgrade -i --wait prometheus prometheus-community/kube-prometheus-stack \ -n prometheus --create-namespace \ - --values $RESOURCES_DIR/opentelemetry-prometheus-values.yaml + --values $RESOURCES_DIR/opentelemetry-prometheus.yaml # Jaeger helm repo add --force-update jaegertracing https://jaegertracing.github.io/helm-charts @@ -38,20 +63,20 @@ setup() { wait_pods -n jaeger # Setup Kubewarden - helm_up kubewarden-controller --values $RESOURCES_DIR/opentelemetry-kw-telemetry-values.yaml - helm_up kubewarden-defaults --set "recommendedPolicies.enabled=True" + helmer set kubewarden-controller --values $RESOURCES_DIR/opentelemetry-telemetry.yaml + helmer set kubewarden-defaults --set recommendedPolicies.enabled=True } @test "[OpenTelemetry] Kubewarden containers have sidecars & metrics" { # Controller is restarted to get sidecar - wait_pods -n kubewarden + wait_pods -n $NAMESPACE # Check all pods have sidecar (otc-container) - might take a minute to start retry "kubectl get pods -n kubewarden --field-selector=status.phase==Running -o json | jq -e '[.items[].spec.containers[1].name == \"otc-container\"] | all'" # Policy server service has the metrics ports - kubectl get services -n kubewarden policy-server-default -o json | jq -e '[.spec.ports[].name == "metrics"] | any' + kubectl get services -n $NAMESPACE policy-server-default -o json | jq -e '[.spec.ports[].name == "metrics"] | any' # Controller service has the metrics ports - kubectl get services -n kubewarden kubewarden-controller-metrics-service -o json | jq -e '[.spec.ports[].name == "metrics"] | any' + kubectl get services -n $NAMESPACE kubewarden-controller-metrics-service -o json | jq -e '[.spec.ports[].name == "metrics"] | any' # Generate metric data kubectl run pod-privileged --image=registry.k8s.io/pause --privileged @@ -73,40 +98,24 @@ setup() { kubectl wait --for=condition=Ready pod nginx-privileged # Deploy some policy - kubectl apply -f $RESOURCES_DIR/privileged-pod-policy.yaml - apply_cluster_admission_policy $RESOURCES_DIR/namespace-label-propagator-policy.yaml - - run kubectl create job --from=cronjob/audit-scanner testing --namespace $NAMESPACE - assert_output -p "testing created" - kubectl wait --for=condition="Complete" job testing --namespace $NAMESPACE + apply_policy --no-wait privileged-pod-policy.yaml + apply_policy namespace-label-propagator-policy.yaml + trigger_audit_scan retry 'test $(get_metrics policy-server-default | grep protect | grep -oE "policy_name=\"[^\"]+" | sort -u | wc -l) -eq 2' } @test "[OpenTelemetry] Disabling telemetry should remove sidecars & metrics" { - helm_up kubewarden-controller --set "telemetry.enabled=False" - helm_up kubewarden-defaults --set "recommendedPolicies.enabled=True" - wait_pods -n kubewarden + helmer set kubewarden-controller \ + --set telemetry.metrics.enabled=False \ + --set telemetry.tracing.enabled=False + helmer set kubewarden-defaults --set recommendedPolicies.enabled=False + wait_pods -n $NAMESPACE # Check sidecars (otc-container) - have been removed retry "kubectl get pods -n kubewarden -o json | jq -e '[.items[].spec.containers[1].name != \"otc-container\"] | all'" # Policy server service has no metrics ports - kubectl get services -n kubewarden policy-server-default -o json | jq -e '[.spec.ports[].name != "metrics"] | all' + kubectl get services -n $NAMESPACE policy-server-default -o json | jq -e '[.spec.ports[].name != "metrics"] | all' # Controller service has no metrics ports - kubectl get services -n kubewarden kubewarden-controller-metrics-service -o json | jq -e '[.spec.ports[].name != "metrics"] | all' -} - -teardown_file() { - load common.bash - # Remove installed apps - helm uninstall --wait -n jaeger jaeger-operator - helm uninstall --wait -n prometheus prometheus - helm uninstall --wait -n open-telemetry my-opentelemetry-operator - helm uninstall --wait -n cert-manager cert-manager - - # Resources might be already deleted by helm update - kubectl delete -f $RESOURCES_DIR/privileged-pod-policy.yaml --ignore-not-found - kubectl delete -f $RESOURCES_DIR/namespace-label-propagator-policy.yaml --ignore-not-found - kubectl delete pod nginx-privileged nginx-unprivileged --ignore-not-found - kubectl delete jobs -n kubewarden testing --ignore-not-found + kubectl get services -n $NAMESPACE kubewarden-controller-metrics-service -o json | jq -e '[.spec.ports[].name != "metrics"] | all' } diff --git a/tests/private-registry-tests.bats b/tests/private-registry-tests.bats index dc02fd6..3ee6992 100644 --- a/tests/private-registry-tests.bats +++ b/tests/private-registry-tests.bats @@ -2,104 +2,106 @@ # https://github.com/kubewarden/kubewarden-controller/pull/421 setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods - FQDN=$(k3d node get k3d-$CLUSTER_NAME-server-0 -o json | jq -r 'first.IP.IP').nip.io - REGISTRY=$FQDN:30707 - PUB_POLICY=registry://ghcr.io/kubewarden/tests/pod-privileged:v0.2.5 - PRIV_POLICY=registry://$REGISTRY/kubewarden/tests/pod-privileged:v0.2.5 + # FQDN=$(k3d node get k3d-$CLUSTER_NAME-server-0 -o json | jq -r 'first.IP.IP').nip.io + FQDN=$(kubectl get nodes -l 'node-role.kubernetes.io/control-plane' -o custom-columns=INTERNAL-IP:.status.addresses[0].address --no-headers | tail -1).nip.io + + REGISTRY=$FQDN:30707 + PUB_POLICY=registry://ghcr.io/kubewarden/tests/pod-privileged:v0.2.5 + PRIV_POLICY=registry://$REGISTRY/kubewarden/tests/pod-privileged:v0.2.5 } teardown_file() { - load common.bash - kubectl delete clusteradmissionpolicies private-pod-privileged ||: - - helm_up kubewarden-defaults --reuse-values \ - --set policyServer.imagePullSecret=null \ - --set policyServer.sourceAuthorities=null - # Can't delete secret - https://github.com/kubewarden/policy-server/issues/459 - # kubectl --namespace kubewarden delete secret secret-registry-docker ||: - - kubectl delete -f $RESOURCES_DIR/private-registry-deploy.yaml ||: - kubectl delete cm registry-auth ||: - kubectl delete secret registry-cert ||: + load ../helpers/helpers.sh + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A + + helmer set kubewarden-defaults \ + --set policyServer.imagePullSecret=null \ + --set policyServer.sourceAuthorities=null + # Can't delete secret - https://github.com/kubewarden/policy-server/issues/459 + # kubectl --namespace kubewarden delete secret secret-registry-docker ||: + + kubectl delete -f $RESOURCES_DIR/private-registry-deploy.yaml --ignore-not-found + kubectl delete cm registry-auth --ignore-not-found + kubectl delete secret registry-cert --ignore-not-found } # https://www.baeldung.com/openssl-self-signed-cert @test "[Private Registry] Generate certificates" { - certdir="$BATS_RUN_TMPDIR/certs/" - mkdir $certdir && cd $certdir - - # Create CA - openssl req -nodes -batch -x509 -sha256 -days 365 -newkey rsa:2048 -keyout rootCA.key -out rootCA.crt - # Create CSR - openssl req -nodes -batch -newkey rsa:2048 -keyout domain.key -out domain.csr \ - -addext "subjectAltName = DNS:$FQDN" - # Create CRT - openssl x509 -req -CA rootCA.crt -CAkey rootCA.key -in domain.csr -out domain.crt -days 365 -CAcreateserial \ - -extfile <(echo "subjectAltName=DNS:$FQDN") - # Print CRT - openssl x509 -text -noout -in domain.crt - - cd - + certdir="$BATS_RUN_TMPDIR/certs/" + mkdir $certdir && cd $certdir + + # Create CA + openssl req -nodes -batch -x509 -sha256 -days 365 -newkey rsa:2048 -keyout rootCA.key -out rootCA.crt + # Create CSR + openssl req -nodes -batch -newkey rsa:2048 -keyout domain.key -out domain.csr \ + -addext "subjectAltName = DNS:$FQDN" + # Create CRT + openssl x509 -req -CA rootCA.crt -CAkey rootCA.key -in domain.csr -out domain.crt -days 365 -CAcreateserial \ + -extfile <(echo "subjectAltName=DNS:$FQDN") + # Print CRT + openssl x509 -text -noout -in domain.crt + + cd - } # https://medium.com/geekculture/deploying-docker-registry-on-kubernetes-3319622b8f32 @test "[Private Registry] Generate AUTH and start registry" { - certdir="$BATS_RUN_TMPDIR/certs/" + certdir="$BATS_RUN_TMPDIR/certs/" - # Create configmap from htpasswd - # docker run --entrypoint htpasswd httpd:2 -Bbn testuser testpassword - kubectl create cm registry-auth \ - --from-literal htpasswd='testuser:$2y$05$bkWZdztgNvW.akipcacKb.nueDup8NGbcTtvqDKG.3keAgUDufapm' + # Create configmap from htpasswd + # docker run --entrypoint htpasswd httpd:2 -Bbn testuser testpassword + kubectl create cm registry-auth \ + --from-literal htpasswd='testuser:$2y$05$bkWZdztgNvW.akipcacKb.nueDup8NGbcTtvqDKG.3keAgUDufapm' - # Create secret with certificates - kubectl create secret tls registry-cert \ - --cert=$certdir/domain.crt --key=$certdir/domain.key + # Create secret with certificates + kubectl create secret tls registry-cert \ + --cert=$certdir/domain.crt --key=$certdir/domain.key - kubectl apply -f $RESOURCES_DIR/private-registry-deploy.yaml - wait_rollout 'deploy/registry' + kubectl apply -f $RESOURCES_DIR/private-registry-deploy.yaml + wait_rollout 'deploy/registry' } @test "[Private Registry] Pull & Push policy to registry" { - jq -n --arg r $REGISTRY \ - '{"auths": {($r): {"auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk"}}}' > "$BATS_RUN_TMPDIR/config.json" - jq -n --arg r $REGISTRY --arg crt "$BATS_RUN_TMPDIR/certs/rootCA.crt" \ - '{"source_authorities":{($r):[{"type":"Path","path":$crt}]}}' > "$BATS_RUN_TMPDIR/sources.json" - - kwctl pull $PUB_POLICY - kwctl push $PUB_POLICY $PRIV_POLICY \ - --docker-config-json-path $BATS_RUN_TMPDIR \ - --sources-path "$BATS_RUN_TMPDIR/sources.json" + jq -n --arg r $REGISTRY \ + '{"auths": {($r): {"auth": "dGVzdHVzZXI6dGVzdHBhc3N3b3Jk"}}}' > "$BATS_RUN_TMPDIR/config.json" + jq -n --arg r $REGISTRY --arg crt "$BATS_RUN_TMPDIR/certs/rootCA.crt" \ + '{"source_authorities":{($r):[{"type":"Path","path":$crt}]}}' > "$BATS_RUN_TMPDIR/sources.json" + + kwctl pull $PUB_POLICY + kwctl push $PUB_POLICY $PRIV_POLICY \ + --docker-config-json-path $BATS_RUN_TMPDIR \ + --sources-path "$BATS_RUN_TMPDIR/sources.json" } # https://docs.kubewarden.io/operator-manual/policy-servers/private-registry @test "[Private Registry] Set up policy server access to registry" { - # Create secret to access registry - kubectl --namespace kubewarden create secret docker-registry secret-registry-docker \ - --docker-username=testuser \ - --docker-password=testpassword \ - --docker-server=$REGISTRY - - # Edit default policy server config - helm_up kubewarden-defaults --reuse-values \ - --set policyServer.imagePullSecret=secret-registry-docker \ - --set policyServer.sourceAuthorities[0].uri="$REGISTRY" \ - --set-file policyServer.sourceAuthorities[0].certs[0]="$BATS_RUN_TMPDIR/certs/rootCA.crt" - - helm get values -n kubewarden kubewarden-defaults + # Create secret to access registry + kubectl --namespace kubewarden create secret docker-registry secret-registry-docker \ + --docker-username=testuser \ + --docker-password=testpassword \ + --docker-server=$REGISTRY + + # Edit default policy server config + helmer set kubewarden-defaults \ + --set policyServer.imagePullSecret=secret-registry-docker \ + --set policyServer.sourceAuthorities[0].uri="$REGISTRY" \ + --set-file policyServer.sourceAuthorities[0].certs[0]="$BATS_RUN_TMPDIR/certs/rootCA.crt" + + helm get values -n $NAMESPACE kubewarden-defaults } @test "[Private Registry] Check I can deploy policy from auth registry" { - policy="$BATS_RUN_TMPDIR/private-policy.yaml" + policy="$BATS_RUN_TMPDIR/private-policy.yaml" - kwctl scaffold manifest --type=ClusterAdmissionPolicy $PUB_POLICY |\ - yq '.metadata.name = "private-pod-privileged"' |\ - PP=$PRIV_POLICY yq '.spec.module = strenv(PP)' > $policy + kwctl scaffold manifest --type=ClusterAdmissionPolicy $PUB_POLICY |\ + yq '.metadata.name = "private-pod-privileged"' |\ + PP=$PRIV_POLICY yq '.spec.module = strenv(PP)' > $policy - # Make sure we use private registry - grep -F "module: registry://$REGISTRY" $policy - apply_cluster_admission_policy $policy - kubectl get clusteradmissionpolicies private-pod-privileged -o json | jq -e '.status.policyStatus == "active"' + # Make sure we use private registry + grep -F "module: registry://$REGISTRY" $policy + apply_policy $policy + kubectl get clusteradmissionpolicies private-pod-privileged -o json | jq -e '.status.policyStatus == "active"' } diff --git a/tests/reconfiguration-tests.bats b/tests/reconfiguration-tests.bats index ff2954f..939f6cb 100644 --- a/tests/reconfiguration-tests.bats +++ b/tests/reconfiguration-tests.bats @@ -1,34 +1,34 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - kubectl delete pods --all - kubectl delete admissionpolicies --all -A - kubectl delete clusteradmissionpolicies --all + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A } @test "[Reconfiguration tests] Apply pod-privileged policy" { - apply_cluster_admission_policy $RESOURCES_DIR/privileged-pod-policy.yaml + apply_policy privileged-pod-policy.yaml } @test "[Reconfiguration tests] Reconfigure Kubewarden stack" { - helm_up kubewarden-controller --values=$RESOURCES_DIR/reconfiguration-values.yaml --reuse-values - wait_for_cluster_admission_policy PolicyActive + helmer set kubewarden-controller --values=$RESOURCES_DIR/reconfiguration-values.yaml + wait_for --for=condition="PolicyActive" clusteradmissionpolicies --all } @test "[Reconfiguration tests] Apply psp-user-group policy" { - apply_admission_policy $RESOURCES_DIR/psp-user-group-policy.yaml + apply_policy psp-user-group-policy.yaml } @test "[Reconfiguration tests] Test that pod-privileged policy works" { - # Launch unprivileged pod - kubectl run pause-unprivileged --image registry.k8s.io/pause - kubectl wait --for=condition=Ready pod pause-unprivileged + # Launch unprivileged pod + kubectl run pause-unprivileged --image registry.k8s.io/pause + kubectl wait --for=condition=Ready pod pause-unprivileged - # Launch privileged pod (should fail) - kubefail_privileged run pause-privileged --image registry.k8s.io/pause --privileged + # Launch privileged pod (should fail) + kubefail_privileged run pause-privileged --image registry.k8s.io/pause --privileged } diff --git a/tests/secure-supply-chain-tests.bats b/tests/secure-supply-chain-tests.bats index d9aac9b..fe461b9 100644 --- a/tests/secure-supply-chain-tests.bats +++ b/tests/secure-supply-chain-tests.bats @@ -12,66 +12,67 @@ CONFIGMAP_NAME="ssc-verification-config" setup() { - load common.bash - wait_pods + load ../helpers/helpers.sh + wait_pods } teardown_file() { - load common.bash - helm_up kubewarden-defaults --set policyServer.verificationConfig="" - kubectl delete configmap -n $NAMESPACE $CONFIGMAP_NAME --ignore-not-found + load ../helpers/helpers.sh + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A + helmer set kubewarden-defaults --set policyServer.verificationConfig="" # TODO =null - https://github.com/kubewarden/kubewarden-controller/issues/903 + kubectl delete configmap -n $NAMESPACE $CONFIGMAP_NAME --ignore-not-found } function create_configmap { - kubectl -n $NAMESPACE delete configmap $CONFIGMAP_NAME --ignore-not-found - kubectl -n $NAMESPACE create configmap $CONFIGMAP_NAME --from-file=verification-config=$1 + kubectl -n $NAMESPACE delete configmap $CONFIGMAP_NAME --ignore-not-found + kubectl -n $NAMESPACE create configmap $CONFIGMAP_NAME --from-file=verification-config=$1 } function get_policy_server_status { - # get latest policy-server pod - local podname=$(kubectl get pods -n kubewarden --selector=app=kubewarden-policy-server-default --sort-by=.metadata.creationTimestamp -o jsonpath="{.items[-1].metadata.name}") - # fill output with logs, 10s timeout because pod restart cleans up - kubectl logs -n kubewarden $podname --request-timeout=10s -f + # get latest policy-server pod + local podname=$(kubectl get pods -n kubewarden --selector=app=kubewarden-policy-server-default --sort-by=.metadata.creationTimestamp -o jsonpath="{.items[-1].metadata.name}") + # fill output with logs, 10s timeout because pod restart cleans up + kubectl logs -n $NAMESPACE $podname --request-timeout=10s -f - # fill exit code with pod status - kubectl get pod -n kubewarden $podname -o json | jq -e '.status.containerStatuses[0].ready == true' + # fill exit code with pod status + kubectl get pod -n $NAMESPACE $podname -o json | jq -e '.status.containerStatuses[0].ready == true' } # Configure kubewarden to check policy signatures # https://docs.kubewarden.io/distributing-policies/secure-supply-chain#configuring-the-policy-server-to-check-policy-signatures @test "[Secure Supply Chain tests] Enable" { - # policyserver needs configmap to start in verification mode - create_configmap <(kwctl scaffold verification-config) - helm_up kubewarden-defaults --set policyServer.verificationConfig=$CONFIGMAP_NAME - kubectl get policyserver default -o json | jq -e --arg cmname $CONFIGMAP_NAME '.spec.verificationConfig == $cmname' + # policyserver needs configmap to start in verification mode + create_configmap <(kwctl scaffold verification-config) + helmer set kubewarden-defaults --set policyServer.verificationConfig=$CONFIGMAP_NAME + kubectl get policyserver default -o json | jq -e --arg cmname $CONFIGMAP_NAME '.spec.verificationConfig == $cmname' } @test "[Secure Supply Chain tests] Trusted policy should not block policy server" { - create_configmap $RESOURCES_DIR/secure-supply-chain-cm.yaml + create_configmap $RESOURCES_DIR/secure-supply-chain-cm.yaml - # Policy Server should start fine - apply_admission_policy $RESOURCES_DIR/policy-pod-privileged.yaml + # Policy Server should start fine + apply_policy policy-pod-privileged.yaml - # Check logs of last policyserver pod - run -0 get_policy_server_status - assert_output -p 'verifying policy authenticity and integrity using sigstore' - assert_output -p 'Local file checksum verification passed' + # Check logs of last policyserver pod + run -0 get_policy_server_status + assert_output -p 'verifying policy authenticity and integrity using sigstore' + assert_output -p 'Local file checksum verification passed' - kubectl delete -f $RESOURCES_DIR/policy-pod-privileged.yaml + delete_policy policy-pod-privileged.yaml } @test "[Secure Supply Chain tests] Untrusted policy should block policy server to run" { - create_configmap $RESOURCES_DIR/secure-supply-chain-cm-restricted.yaml + create_configmap $RESOURCES_DIR/secure-supply-chain-cm-restricted.yaml - # Policy Server startup should fail - kubectl apply -f $RESOURCES_DIR/policy-pod-privileged.yaml - run kubectl -n $NAMESPACE rollout status --timeout=1m "deployment/policy-server-default" - assert_failure 1 + # Policy Server startup should fail + apply_policy --no-wait policy-pod-privileged.yaml + run kubectl -n $NAMESPACE rollout status --timeout=1m "deployment/policy-server-default" + assert_failure 1 - # Check logs of last policyserver pod - run -1 get_policy_server_status - assert_output -p 'Annotation not satisfied' - assert_output -p 'policy cannot be verified' + # Check logs of last policyserver pod + run -1 get_policy_server_status + assert_output -p 'Annotation not satisfied' + assert_output -p 'policy cannot be verified' - kubectl delete -f $RESOURCES_DIR/policy-pod-privileged.yaml + delete_policy policy-pod-privileged.yaml } diff --git a/tests/upgrade.bats b/tests/upgrade.bats index c90fa2c..345aaec 100644 --- a/tests/upgrade.bats +++ b/tests/upgrade.bats @@ -1,8 +1,15 @@ #!/usr/bin/env bats setup() { - load common.bash - wait_pods -n kube-system + load ../helpers/helpers.sh + wait_pods -n kube-system +} + +teardown_file() { + load ../helpers/helpers.sh + kubectl delete pods --all + kubectl delete admissionpolicies,clusteradmissionpolicies --all -A + helmer reset kubewarden-defaults } # helper function to allow run + pipe @@ -12,14 +19,20 @@ function get_apiversion() { # check_apiversion admissionpolicies v1 function check_apiversion { - run -0 get_apiversion $1 - assert_output "policies.kubewarden.io/$2" + run -0 get_apiversion $1 + assert_output "policies.kubewarden.io/$2" } -function check_default_policies { - # Check all policies are in v1 +@test "[CRD upgrade] Check default policies in protect mode" { + helmer set kubewarden-defaults \ + --set recommendedPolicies.enabled=True \ + --set recommendedPolicies.defaultPolicyMode=protect + + # Wait for policies be enforced + wait_policies PolicyUniquelyReachable + + # Check all recommended are in v1 check_apiversion clusteradmissionpolicies v1 - wait_for_cluster_admission_policy PolicyUniquelyReachable # Run privileged pod (should fail) # kubefail_privileged run pod-privileged --image=registry.k8s.io/pause --privileged @@ -27,38 +40,14 @@ function check_default_policies { run -1 kubectl run pod-privileged --image=registry.k8s.io/pause --privileged } -@test "[CRD upgrade] Install old Kubewarden" { - # Required by Kubewarden <= v1.16.0 - helm repo add jetstack https://charts.jetstack.io --force-update - helm upgrade -i --wait cert-manager jetstack/cert-manager -n cert-manager --create-namespace --set crds.enabled=true - - # Install old kubewarden version - KUBEWARDEN_CHARTS_LOCATION="$KUBEWARDEN_HELM_REPO_NAME" helm_in kubewarden-crds --version $KUBEWARDEN_CRDS_CHART_OLD_VERSION - KUBEWARDEN_CHARTS_LOCATION="$KUBEWARDEN_HELM_REPO_NAME" helm_in kubewarden-controller --version $KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION - KUBEWARDEN_CHARTS_LOCATION="$KUBEWARDEN_HELM_REPO_NAME" helm_in kubewarden-defaults --version $KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION \ - --set recommendedPolicies.enabled=True \ - --set recommendedPolicies.defaultPolicyMode=protect - check_default_policies -} - -@test "[CRD upgrade] Upgrade Kubewarden" { - helm_up kubewarden-crds --version $KUBEWARDEN_CRDS_CHART_VERSION - helm_up kubewarden-controller --version $KUBEWARDEN_CONTROLLER_CHART_VERSION - helm_up kubewarden-defaults --version $KUBEWARDEN_DEFAULTS_CHART_VERSION - check_default_policies - # Not required by Kubewarden >= v1.17.0 - # Cert-manager versions prior to v1.15.0 do not keep the CustomResourceDefinition on uninstall - helm uninstall cert-manager -n cert-manager -} - @test "[CRD upgrade] Check old policy CRD version is translated to new" { - sed '/apiVersion:/ s#/v1.*#/v1alpha2#' $RESOURCES_DIR/policy-pod-privileged.yaml | apply_admission_policy - check_apiversion admissionPolicy v1 - kubectl delete -f $RESOURCES_DIR/policy-pod-privileged.yaml + yq '.apiVersion = "policies.kubewarden.io/v1alpha2"' $RESOURCES_DIR/policies/policy-pod-privileged.yaml | apply_policy + check_apiversion admissionPolicy v1 + delete_policy policy-pod-privileged.yaml } @test "[CRD upgrade] Disable default policies & run privileged pod" { - helm_up kubewarden-defaults --set recommendedPolicies.enabled=False - wait_rollout -n $NAMESPACE "deployment/policy-server-default" - kubectl run pod-privileged --image=registry.k8s.io/pause --privileged + helmer set kubewarden-defaults --set recommendedPolicies.enabled=False + kubectl run pod-privileged --image=registry.k8s.io/pause --privileged + kubectl delete pod pod-privileged }