Skip to content

Commit

Permalink
Merge pull request #119 from kravciak/helmer
Browse files Browse the repository at this point in the history
Introduce helmer.sh helm manager for tests
  • Loading branch information
kravciak authored Oct 8, 2024
2 parents c031511 + 52631e2 commit 41fd7e6
Show file tree
Hide file tree
Showing 33 changed files with 787 additions and 607 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
resources/resources_*/
charts
128 changes: 42 additions & 86 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,106 +1,62 @@
.DEFAULT_GOAL := basic-end-to-end-tests.bats

mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST)))
mkfile_dir := $(dir $(mkfile_path))
TESTS_DIR ?= $(mkfile_dir)tests
# directory with all the "template" files used to generated the files used during the tests
ROOT_RESOURCES_DIR ?= $(mkfile_dir)resources

# Kubewarden helm repository
KUBEWARDEN_HELM_REPO_NAME ?= kubewarden
# Override to use kubewarden charts from local directory
KUBEWARDEN_CHARTS_LOCATION ?= $(KUBEWARDEN_HELM_REPO_NAME)
NAMESPACE ?= kubewarden

export CLUSTER_NAME ?= kubewarden-testing
CLUSTER_CONTEXT ?= k3d-$(CLUSTER_NAME)

O := $(shell helm repo add $(KUBEWARDEN_HELM_REPO_NAME) https://charts.kubewarden.io --force-update)
O := $(shell helm repo update $(KUBEWARDEN_HELM_REPO_NAME))

# Parse current and previous helm versions for upgrade test:
# Current: last version from helm search kubewarden --devel
# Old: version that is older than the current version and also not an "-rc"
KW_VERSIONS := $(shell helm search repo --fail-on-no-result $(KUBEWARDEN_HELM_REPO_NAME)/ --versions --devel -o json | tr -d \' \
| jq -ec 'unique_by(.name) as $$c | { current:($$c | map({(.name): .version}) | add), old:map(select(.app_version != $$c[0].app_version and (.app_version | contains("rc") | not) )) | unique_by(.name)| map({(.name): .version}) | add}')
.DEFAULT_GOAL := all

KUBEWARDEN_CONTROLLER_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-controller"]' || echo "*")
KUBEWARDEN_CRDS_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-crds"]' || echo "*")
KUBEWARDEN_DEFAULTS_CHART_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.current["kubewarden/kubewarden-defaults"]' || echo "*")

KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-controller"]' || echo "*")
KUBEWARDEN_CRDS_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-crds"]' || echo "*")
KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION := $(shell echo '$(KW_VERSIONS)' | jq -er '.old["kubewarden/kubewarden-defaults"]' || echo "*")

# CRD version to be tested
CRD_VERSION ?= $(shell helm show values $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults --version '$(KUBEWARDEN_DEFAULTS_CHART_VERSION)' | yq ".crdVersion")
CRD_VERSION_SUFFIX ?= $(shell echo $(CRD_VERSION) | cut -d'/' -f2)
# directory with all the files used during the tests. This files are copied from
# $(ROOT_RESOURCES_DIR) and changed to used the CRDs version defined in $(CRD_VERSION)
RESOURCES_DIR ?= $(ROOT_RESOURCES_DIR)/resources_$(CRD_VERSION_SUFFIX)
MKFILE_DIR ?= $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
TESTS_DIR ?= $(MKFILE_DIR)tests
RESOURCES_DIR ?= $(MKFILE_DIR)resources

NAMESPACE ?= kubewarden
CLUSTER_CONTEXT ?= $(shell kubectl config current-context)

# ==================================================================================================
# Aliases
kube = kubectl --context $(CLUSTER_CONTEXT) $(1)
helm = helm --kube-context $(CLUSTER_CONTEXT) $(1)
bats = RESOURCES_DIR=$(RESOURCES_DIR) \
KUBEWARDEN_CRDS_CHART_OLD_VERSION=$(KUBEWARDEN_CRDS_CHART_OLD_VERSION) \
KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION=$(KUBEWARDEN_DEFAULTS_CHART_OLD_VERSION) \
KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION=$(KUBEWARDEN_CONTROLLER_CHART_OLD_VERSION) \
KUBEWARDEN_CRDS_CHART_VERSION=$(KUBEWARDEN_CRDS_CHART_VERSION) \
KUBEWARDEN_DEFAULTS_CHART_VERSION=$(KUBEWARDEN_DEFAULTS_CHART_VERSION) \
KUBEWARDEN_CONTROLLER_CHART_VERSION=$(KUBEWARDEN_CONTROLLER_CHART_VERSION) \
KUBEWARDEN_CHARTS_LOCATION=$(KUBEWARDEN_CHARTS_LOCATION) \
KUBEWARDEN_HELM_REPO_NAME=$(KUBEWARDEN_HELM_REPO_NAME) \
CLUSTER_CONTEXT=$(CLUSTER_CONTEXT) \
NAMESPACE=$(NAMESPACE) \
bats -T --print-output-on-failure $(1)
# Optional arguments for scripts

helm_in = $(helm) upgrade --install --wait --namespace $(NAMESPACE) --create-namespace
# cluster_k3d.sh:
# K3S=[1.30] - short|long version
# CLUSTER_NAME=[k3d-default]

# ==================================================================================================
# Macros
define install-kubewarden =
$(helm_in) kubewarden-crds $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-crds --version "$(KUBEWARDEN_CRDS_CHART_VERSION)"
$(helm_in) kubewarden-controller $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-controller --version "$(KUBEWARDEN_CONTROLLER_CHART_VERSION)"
$(helm_in) kubewarden-defaults $(KUBEWARDEN_CHARTS_LOCATION)/kubewarden-defaults --version "$(KUBEWARDEN_DEFAULTS_CHART_VERSION)"
$(kube) wait --for=condition=Ready --namespace $(NAMESPACE) pods --all
endef

define generate-versioned-resources-dir =
./scripts/generate_resources_dir.sh $(ROOT_RESOURCES_DIR) $(CRD_VERSION)
endef
# helmer.sh:
# VERSION=[next|prev|v1.17.0-rc2|local] (app version)
# REPO_NAME=[kubewarden]
# CHARTS_LOCATION=[./dirname|reponame]
# LATEST=[1]
# CRDS_ARGS, DEFAULTS_ARGS, CONTROLLER_ARGS

# ==================================================================================================
# Targets

# Destructive tests that reinstall kubewarden
# Test is responsible for used kubewarden version
upgrade::
$(generate-versioned-resources-dir)
$(call bats, $(TESTS_DIR)/upgrade.bats)
.PHONY: clean cluster install upgrade uninstall tests all

# Generate target for every test file
TESTS := $(notdir $(wildcard tests/*.bats))
$(TESTS)::
$(generate-versioned-resources-dir)
$(call bats, $(TESTS_DIR)/$@)
TESTFILES := $(notdir $(wildcard tests/*.bats))
$(TESTFILES):
@RESOURCES_DIR=$(RESOURCES_DIR) \
NAMESPACE=$(NAMESPACE) \
CLUSTER_CONTEXT=$(CLUSTER_CONTEXT) \
bats -T --print-output-on-failure $(TESTS_DIR)/$@

# Target all non-destructive tests
.PHONY: tests
tests: $(filter-out upgrade.bats audit-scanner-installation.bats, $(TESTS))

.PHONY: cluster install reinstall clean
tests: $(filter-out upgrade.bats audit-scanner-installation.bats, $(TESTFILES))

cluster:
k3d cluster create $(CLUSTER_NAME) -s 1 -a 1 --wait -v /dev/mapper:/dev/mapper
$(kube) wait --for=condition=Ready nodes --all
./scripts/cluster_k3d.sh create

install: check
./scripts/helmer.sh install

install:
$(install-kubewarden)
upgrade:
./scripts/helmer.sh upgrade
$(MAKE) upgrade.bats

uninstall:
./scripts/helmer.sh uninstall

clean:
k3d cluster delete $(CLUSTER_NAME)
./scripts/cluster_k3d.sh delete

all: clean cluster install tests

reinstall: clean cluster install
check:
@yq --version | grep mikefarah > /dev/null || { echo "yq is not the correct, needs mikefarah/yq!"; exit 1; }
@jq --version > /dev/null || { echo "jq is not installed!"; exit 1; }
@k3d --version > /dev/null || { echo "k3d is not installed!"; exit 1; }
@bats --version > /dev/null || { echo "bats is not installed!"; exit 1; }
91 changes: 91 additions & 0 deletions helpers/helpers.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
#!/usr/bin/env bash

bats_require_minimum_version 1.7.0

load "../helpers/bats-support/load.bash"
load "../helpers/bats-assert/load.bash"
load "../helpers/kubelib.sh"

# ==================================================================================================
# Functions specific to kubewarden tests (require bats)

kubectl() { command kubectl --context "$CLUSTER_CONTEXT" --warnings-as-errors "$@"; }
helm() { command helm --kube-context "$CLUSTER_CONTEXT" "$@"; }
helmer() { $BATS_TEST_DIRNAME/../scripts/helmer.sh "$@"; }

# Export for retry function (subshell)
export -f kubectl helm

# ==================================================================================================

trigger_audit_scan() {
local jobname=${1:-auditjob}
kubectl create job --from=cronjob/audit-scanner $jobname --namespace $NAMESPACE | grep "$jobname created"
kubectl wait --timeout=3m --for=condition="Complete" job $jobname --namespace $NAMESPACE
kubectl delete job $jobname --namespace $NAMESPACE
}

# Run & delete pod with optional parameters. Check exit code.
# kuberun [-0|-1|-N|!] "--privileged"
function kuberun {
local status=-0
[[ $1 =~ ^([!]|-[0-9]+)$ ]] && status="$1" && shift
run "$status" kubectl run "pod-$(date +%s)" --image=busybox --restart=Never --rm -it --command "$@" -- true
}

# Run kubectl action which should fail on pod privileged policy
function kubefail_privileged {
run kubectl "$@"
assert_failure 1
assert_output --regexp '^Error.*: admission webhook.*denied the request.*container is not allowed$'
}

# Prepend policies with RESOURCE dir if file doesn't contain '/'
policypath() { [[ "$1" == */* ]] && echo "$1" || echo "$RESOURCES_DIR/policies/$1"; }

# Deploy from pipe or resources dir (if parameter doesn't contain '/')
# Detect policy kind and wait for it to be active and uniquely reachable
# Works only with default policy server
function apply_policy {
[ "${1:-}" = '--no-wait' ] && local nowait=true && shift

# Handle policy yaml from pipe (-p /dev/stdin fails on github runner)
if [ $# -eq 0 ]; then
local tempfile=$(mktemp -p "$BATS_RUN_TMPDIR" policy-XXXXX.yaml)
cat > "$tempfile"
fi

# Apply the policy and delete tempfile
local pfile=${tempfile:-$(policypath "$1")}
local kind=$(yq '.kind' "$pfile")
kubectl apply -f "$pfile"

# Wait for the policy to be active and uniquely reachable
if [ ! -v nowait ]; then
wait_for --for=condition="PolicyActive" "$kind" --all -A
wait_policyserver default
wait_for --for=condition="PolicyUniquelyReachable" "$kind" --all -A
fi
}

function delete_policy {
local pfile=$(policypath "$1")
kubectl delete --wait -f "$pfile" "${@:2}"
}

# wait_policies [condition] - at least one policy must exist
function wait_policies {
for chart in ${1:-PolicyActive PolicyUniquelyReachable}; do
wait_for --for=condition="$1" admissionpolicies,clusteradmissionpolicies,admissionpolicygroups,clusteradmissionpolicygroups --all -A
done
}

# wait_policyserver [name]
function wait_policyserver {
local name="${1:-default}"
# Wait for specific revision to prevent changes during rollout
revision=$(kubectl -n $NAMESPACE get "deployment/policy-server-$name" -o json | jq -er '.metadata.annotations."deployment.kubernetes.io/revision"')
wait_rollout -n $NAMESPACE --revision $revision "deployment/policy-server-$name"
# Wait for final rollout?
wait_rollout -n $NAMESPACE "deployment/policy-server-$name"
}
64 changes: 64 additions & 0 deletions helpers/kubelib.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#!/usr/bin/env bash
set -aeEuo pipefail

# ==================================================================================================
# General helpers

log () { printf -- "$(date +%R) \e[${1}m${*:2}\e[0m\n"; }
step () { log 32 "${*}" $(basename "${BASH_SOURCE[1]/${BASH_SOURCE}}" | sed 's/.\+/[&]/'); } # print test module
info () { log 0 " ${*}"; }
warn () { log 33 " ${*}"; }
error() { log 31 " ${*}"; }

# ==================================================================================================
# Kubernetes helpers

yq() { command yq -e "$@"; }
jq() { command jq -e "$@"; }

# Export for retry function (subshell)
export -f yq jq

function retry() {
local cmd=$1
local tries=${2:-15}
local delay=${3:-20}
local i

for ((i=1; i<=tries; i++)); do
timeout 25 bash -c "$cmd" && break || echo "RETRY #$i: $cmd"
[ $i -ne $tries ] && sleep $delay || { echo "Godot: $cmd"; false; }
done
}

# Safe version of waiting for pods. Looks in kube-system ns by default
# Handles kube-api disconnects during upgrade
function wait_pods() {
local i output
for i in {1..20}; do
output=$(kubectl get pods --no-headers -o wide ${@:--n kubewarden} | grep -vw Completed || echo 'Fail')
grep -vE '([0-9]+)/\1 +Running' <<< $output || break
[ $i -ne 20 ] && sleep 30 || { echo "Godot: pods not running"; false; }
done
}

# Safe version of waiting for nodes
# Handles kube-api disconnects during upgrade
function wait_nodes() {
local i output
for i in {1..20}; do
output=$(kubectl get nodes --no-headers ${@:-} || echo 'Fail')
grep -vE '\bReady\b' <<< $output || break
[ $i -ne 20 ] && sleep 30 || { echo "Godot: nodes not running"; false; }
done
}

function wait_for () { kubectl wait --timeout=5m "$@"; }
function wait_rollout() { kubectl rollout status --timeout=5m "$@"; }

# Wait for cluster to come up after reboot
function wait_cluster() {
retry "kubectl cluster-info" 20 30
wait_nodes
wait_pods
}
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
apiVersion: policies.kubewarden.io/v1alpha2
apiVersion: policies.kubewarden.io/v1
kind: ClusterAdmissionPolicy
metadata:
name: psp-user-group-disabled
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
apiVersion: policies.kubewarden.io/v1alpha2
apiVersion: policies.kubewarden.io/v1
kind: ClusterAdmissionPolicy
metadata:
name: psp-user-group-enabled
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
apiVersion: policies.kubewarden.io/v1alpha2
apiVersion: policies.kubewarden.io/v1
kind: AdmissionPolicy
metadata:
name: pod-privileged
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
apiVersion: policies.kubewarden.io/v1alpha2
apiVersion: policies.kubewarden.io/v1
kind: ClusterAdmissionPolicy
metadata:
name: privileged-pods
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
apiVersion: policies.kubewarden.io/v1alpha2
apiVersion: policies.kubewarden.io/v1
kind: ClusterAdmissionPolicy
metadata:
name: privileged-pods
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
40 changes: 40 additions & 0 deletions scripts/cluster_k3d.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/usr/bin/env bash
set -aeEuo pipefail
# trap 'echo "Error on ${BASH_SOURCE/$PWD/.}:${LINENO} $(sed -n "${LINENO} s/^\s*//p" $PWD/${BASH_SOURCE/$PWD})"' ERR

. "$(dirname "$0")/../helpers/kubelib.sh"

# Optional variables
K3S=${K3S:-$(k3d version -o json | jq -r '.k3s')}
CLUSTER_NAME=${CLUSTER_NAME:-k3s-default}
MASTER_COUNT=${MASTER_COUNT:-1}
WORKER_COUNT=${WORKER_COUNT:-1}

# Complete partial K3S version from dockerhub
if [[ ! $K3S =~ ^v[0-9.]+-k3s[0-9]$ ]]; then
K3S=$(curl -L -s "https://registry.hub.docker.com/v2/repositories/rancher/k3s/tags?page_size=20&name=$K3S" | jq -re 'first(.results[].name | select(test("^v[0-9.]+-k3s[0-9]$")))')
echo "K3S version: $K3S"
fi

# Create new cluster
if [ "${1:-}" == 'create' ]; then
# /dev/mapper: https://k3d.io/v5.7.4/faq/faq/#issues-with-btrfs
k3d cluster create $CLUSTER_NAME --wait \
--image rancher/k3s:$K3S \
-s $MASTER_COUNT -a $WORKER_COUNT \
--registry-create k3d-$CLUSTER_NAME-registry \
-v /dev/mapper:/dev/mapper
wait_pods -n kube-system
fi

# Delete existing cluster
if [ "${1:-}" == 'delete' ]; then
k3d cluster delete $CLUSTER_NAME
fi

# Return 0 if cluster exists otherwise non 0
if [ "${1:-}" == 'status' ]; then
k3d cluster list $CLUSTER_NAME &>/dev/null
fi

:
Loading

0 comments on commit 41fd7e6

Please sign in to comment.