diff --git a/.fmf/version b/.fmf/version new file mode 100644 index 0000000..d00491f --- /dev/null +++ b/.fmf/version @@ -0,0 +1 @@ +1 diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml deleted file mode 100644 index e4b769e..0000000 --- a/.github/workflows/verify.yaml +++ /dev/null @@ -1,44 +0,0 @@ -name: Verify - -on: - pull_request: - branches: [ main ] - -jobs: - verify: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-java@v4 - with: - distribution: 'temurin' - java-version: '17' - - - name: Cache m2 repo - uses: actions/cache@v3 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-maven- - - - name: Create k8s Kind Cluster - uses: helm/kind-action@v1 - - - name: Install operator-dsk cli - run: | - export ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) - export OS=$(uname | awk '{print tolower($0)}') - export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.36.0 - curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} - chmod +x operator-sdk_${OS}_${ARCH} && sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk - - - name: Install olm to kind - run: operator-sdk olm install - - - name: Get yaml install files - run: ./mvnw install -P get-operator-files - - - name: Verify - run: ./mvnw verify -P test -Dgroups=dummy diff --git a/.packit.yaml b/.packit.yaml new file mode 100644 index 0000000..cb055d3 --- /dev/null +++ b/.packit.yaml @@ -0,0 +1,27 @@ +# Default packit instance is a prod and only this is used +# stg instance is present for testing new packit features in forked repositories where stg is installed. +packit_instances: ["prod", "stg"] +upstream_project_url: https://github.com/skodjob/streams-e2e +issue_repository: https://github.com/skodjob/streams-e2e +jobs: + - job: tests + trigger: pull_request + # Suffix for job name + identifier: "verify" + targets: + # This target is not used at all by our tests, but it has to be one of the available - https://packit.dev/docs/configuration/#aliases + - centos-stream-9-x86_64 + # ARM is commented since apicurio-operator upstream is not supported on ARM + # - centos-stream-9-aarch64 + # We don't need to build any packages for Fedora/RHEL/CentOS, it is not related to streams-e2e tests + skip_build: true + manual_trigger: true + env: { IP_FAMILY: ipv4 } + labels: + - verify + - dummy + tf_extra_params: + test: + tmt: + name: verify + ############################################################################################### diff --git a/tmt/README.md b/tmt/README.md new file mode 100644 index 0000000..e11707f --- /dev/null +++ b/tmt/README.md @@ -0,0 +1,73 @@ +# Testing farm + +This document gives a detailed breakdown of the testing processes using testing farm service. + +## Pre-requisites + +* Python >=3.9 +* TMT command line tool (optional) - for lint and check tmt formatted test plans and tests + * `pip install tmt[all]` +* Testing farm command line tool - for trigger your test plan in testing-farm + * `pip install tft-cli` + +## Links + +* [Test Management Tool (tmt)](https://tmt.readthedocs.io/en/latest/index.html) +* [Testing farm](https://docs.testing-farm.io/general/0.1/index.html) + +## Current plans and tests +Plans are stored in [plans](./plans) folder, there is a file called `main.fmf` which contains test plan definition. +This definition is composed of hw requirements, prepare steps for created VM executor and specific plans. Specific +plan defines selectors for [tests](./tests) which should be executed. + +### List of plans +* verify + +## Usage + +### Pre-requisites +1. Get API token for testing farm [(how-to obtain token)](https://docs.testing-farm.io/general/0.1/onboarding.html) +2. Store token into env var ```export TESTING_FARM_API_TOKEN="your_token"``` + +### Run tests + +Run all plans +```commandline +testing-farm request --compose Fedora-38 --git-url https://github.com/skodjob/streams-e2e.git +``` + +Select specific plan and git branch +```commandline +testing-farm request --compose Fedora-38 \ + --git-url https://github.com/skodjob/streams-e2e.git \ + --git-ref some-branch \ + --plan smoke +``` + +Run multi-arch build +```commandline +testing-farm request --compose Fedora-Rawhide \ + --git-url https://github.com/skodjob/streams-e2e.git \ + --git-ref some-branch \ + --plan smoke \ + --arch aarch64,x86_64 +``` + +## Packit-as-a-service for PR check + +[Packit-as-a-service](https://github.com/marketplace/packit-as-a-service) is a github application +for running testing-farm jobs from PR requested by command. Definition of the jobs is stored in +[.packit.yaml](../.packit.yaml). Packit can be triggered from the PR by comment, but only members of strimzi +organization are able to run tests. + +### Usage + +Run all jobs for PR +``` +/packit test +``` + +Run selected jobs by label +``` +/packit test --labels verify +``` diff --git a/tmt/plans/main.fmf b/tmt/plans/main.fmf new file mode 100644 index 0000000..51c63cd --- /dev/null +++ b/tmt/plans/main.fmf @@ -0,0 +1,120 @@ +# TMT test plan definition +# https://tmt.readthedocs.io/en/latest/overview.html + +# Baseline common for all test plans +####################################################################### +summary: Streams-e2e test suite +discover: + how: fmf + +# Required HW +provision: + hardware: + memory: ">= 16 GiB" + cpu: + processors: ">= 4" + +# Install required packages and scripts for running streams-e2e suite +prepare: + - name: Clean cache + how : shell + script: | + OS=$(cat /etc/redhat-release || true) + if [[ ${OS} == *"CentOS"* ]]; then + sudo yum -y clean dbcache + else + sudo dnf -y clean dbcache + fi + + - name: Install packages + how: install + package: + - wget + - java-17-openjdk-devel + - xz + - make + - git + - zip + - coreutils + + - name: Install docker-ce + how: shell + script: | + OS=$(cat /etc/redhat-release || true) + if [[ ${OS} == *"CentOS"* ]]; then + sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + else + sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo + fi + + sudo yum install -y docker-ce + sudo systemctl enable docker --now + + - name: Install yq + how: shell + script: | + ARCH=$(uname -m) + if [[ $ARCH == "x86_64" ]]; then ARCH="amd64"; fi + if [[ $ARCH == "aarch64" ]]; then ARCH="arm64"; fi + ./tmt/scripts/install_yq.sh ${ARCH} + + - name: Install oc kubectl client + how: shell + script: | + PLATFORM=$(uname -m) + URL="http://mirror.openshift.com/pub/openshift-v4/${PLATFORM}/clients/ocp/stable/openshift-client-linux.tar.gz" + mkdir -p /tmp/openshift + wget ${URL} -O openshift.tar.gz -q + tar xzf openshift.tar.gz -C /tmp/openshift + sudo cp /tmp/openshift/oc /usr/bin/oc + sudo cp /tmp/openshift/kubectl /usr/bin/kubectl + sudo rm -rf /tmp/openshift/ + sudo rm -rf openshift.tar.gz + + - name: Install mvn + how: shell + script: | + mkdir -p /usr/share/maven /usr/share/maven/ref + curl -fsSL -o /tmp/apache-maven.tar.gz https://apache.osuosl.org/maven/maven-3/3.9.8/binaries/apache-maven-3.9.8-bin.tar.gz + tar -xzf /tmp/apache-maven.tar.gz -C /usr/share/maven --strip-components=1 + rm -f /tmp/apache-maven.tar.gz + ln -s /usr/share/maven/bin/mvn /usr/bin/mvn + + - name: Install kind + how: shell + script: | + ARCH=$(uname -m) + if [[ $ARCH == "x86_64" ]]; then ARCH="amd64"; fi + if [[ $ARCH == "aarch64" ]]; then ARCH="arm64"; fi + ./tmt/scripts/setup-kind.sh ${ARCH} + + - name: Install helm + how: shell + script: | + ./tmt/scripts/setup-helm.sh + + - name: Install operator-sdk and olm + how: shell + script: | + export ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) + export OS=$(uname | awk '{print tolower($0)}') + export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.36.0 + curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} + chmod +x operator-sdk_${OS}_${ARCH} && sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk + operator-sdk olm install + +# Discover tmt defined tests in tests/ folder +execute: + how: tmt + +# Post install step to copy logs +finish: + how: shell + script: ./tmt/scripts/copy-logs.sh +####################################################################### + +/verify: + summary: Run dummy streams-e2e test suite + discover+: + test: + - verify diff --git a/tmt/scripts/copy-logs.sh b/tmt/scripts/copy-logs.sh new file mode 100755 index 0000000..f2fdbb0 --- /dev/null +++ b/tmt/scripts/copy-logs.sh @@ -0,0 +1,15 @@ +#!/bin/sh -eux + + +TEST_LOG_DIR="${TMT_PLAN_DATA}/../discover/default-0/tests/target/logs" +XUNIT_LOG_DIR="${TMT_PLAN_DATA}/../discover/default-0/tests/target/failsafe-reports" + +TARGET_DIR="${TMT_PLAN_DATA}" +LOGS_DIR="${TARGET_DIR}/logs" +XUNIT_DIR="${TARGET_DIR}/xunit" + +mkdir -p "${LOGS_DIR}" +mkdir -p "${XUNIT_DIR}" + +cp -R "${TEST_LOG_DIR}" "${LOGS_DIR}" || true +cp -R "${XUNIT_LOG_DIR}" "${XUNIT_DIR}" || true diff --git a/tmt/scripts/install_yq.sh b/tmt/scripts/install_yq.sh new file mode 100755 index 0000000..4e51840 --- /dev/null +++ b/tmt/scripts/install_yq.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +ARCH=$1 +if [ -z "$ARCH" ]; then + ARCH="amd64" +fi + +curl -L https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_${ARCH} > yq && chmod +x yq +sudo mv yq /usr/bin/ diff --git a/tmt/scripts/setup-helm.sh b/tmt/scripts/setup-helm.sh new file mode 100755 index 0000000..9fc92c1 --- /dev/null +++ b/tmt/scripts/setup-helm.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +set -x + +TEST_HELM3_VERSION=${TEST_HELM3_VERSION:-'v3.15.1'} +TEST_HELM_UNITTEST_VERSION=${TEST_HELM_UNITTEST_VERSION:-'v0.5.1'} + +function install_helm3 { + export HELM_INSTALL_DIR=/usr/bin + curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh + # we need to modify the script with a different path because on the Azure pipelines the HELM_INSTALL_DIR env var is not honoured + sed -i 's#/usr/local/bin#/usr/bin#g' get_helm.sh + chmod 700 get_helm.sh + + echo "Installing helm 3..." + sudo ./get_helm.sh --version "${TEST_HELM3_VERSION}" + + echo "Verifying the installation of helm binary..." + # run a proper helm command instead of, for example, "which helm", to verify that we can call the binary + helm --help + helmCommandOutput=$? + + if [ $helmCommandOutput != 0 ]; then + echo "helm binary hasn't been installed properly - exiting..." + exit 1 + fi +} + +function install_helm_unittest { + echo "Installing helm unittest plugin ..." + helm plugin install --version $TEST_HELM_UNITTEST_VERSION https://github.com/helm-unittest/helm-unittest.git + + echo "Verifying the installation of helm unittest plugin ..." + # run a proper helm command instead of, for example, "which helm", to verify that we can call the binary + helm unittest --help + helmCommandOutput=$? + + if [ $helmCommandOutput != 0 ]; then + echo "helm unittest plugin hasn't been installed properly - exiting..." + exit 1 + fi +} + +install_helm3 +install_helm_unittest diff --git a/tmt/scripts/setup-kind.sh b/tmt/scripts/setup-kind.sh new file mode 100755 index 0000000..31ec9ef --- /dev/null +++ b/tmt/scripts/setup-kind.sh @@ -0,0 +1,260 @@ +#!/usr/bin/env bash +set -xe + +rm -rf ~/.kube + +KUBE_VERSION=${KUBE_VERSION:-1.23.0} +COPY_DOCKER_LOGIN=${COPY_DOCKER_LOGIN:-"false"} + +DEFAULT_CLUSTER_MEMORY=$(free -m | grep "Mem" | awk '{print $2}') +DEFAULT_CLUSTER_CPU=$(awk '$1~/cpu[0-9]/{usage=($2+$4)*100/($2+$4+$5); print $1": "usage"%"}' /proc/stat | wc -l) + +CLUSTER_MEMORY=${CLUSTER_MEMORY:-$DEFAULT_CLUSTER_MEMORY} +CLUSTER_CPU=${CLUSTER_CPU:-$DEFAULT_CLUSTER_CPU} + +echo "[INFO] CLUSTER_MEMORY: ${CLUSTER_MEMORY}" +echo "[INFO] CLUSTER_CPU: ${CLUSTER_CPU}" + +# note that IPv6 is only supported on kind (i.e., minikube does not support it). Also we assume that when you set this flag +# to true then you meet requirements (i.) net.ipv6.conf.all.disable_ipv6 = 0 (ii. you have installed CNI supporting IPv6) +IP_FAMILY=${IP_FAMILY:-"ipv4"} + +ARCH=$1 +if [ -z "$ARCH" ]; then + ARCH="amd64" +fi + +function install_kubectl { + if [ "${TEST_KUBECTL_VERSION:-latest}" = "latest" ]; then + TEST_KUBECTL_VERSION=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) + fi + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${TEST_KUBECTL_VERSION}/bin/linux/${ARCH}/kubectl && chmod +x kubectl + sudo cp kubectl /usr/local/bin +} + +function label_node { + # It should work for all clusters + for nodeName in $(kubectl get nodes -o custom-columns=:.metadata.name --no-headers); + do + echo ${nodeName}; + kubectl label node ${nodeName} rack-key=zone; + done +} + +function install_kubernetes_provisioner { + + if [ "${TEST_KUBERNETES_VERSION:-latest}" = "latest" ]; then + # get the latest released tag + TEST_KUBERNETES_VERSION=$(curl https://api.github.com/repos/kubernetes-sigs/kind/releases/latest | grep -Po "(?<=\"tag_name\": \").*(?=\")") + fi + TEST_KUBERNETES_URL=https://github.com/kubernetes-sigs/kind/releases/download/${TEST_KUBERNETES_VERSION}/kind-linux-${ARCH} + + if [ "$KUBE_VERSION" != "latest" ] && [ "$KUBE_VERSION" != "stable" ]; then + KUBE_VERSION="v${KUBE_VERSION}" + fi + + curl -Lo kind ${TEST_KUBERNETES_URL} && chmod +x kind + sudo cp kind /usr/local/bin +} + +function create_cluster_role_binding_admin { + kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default +} + +: ' +@brief: Set up Kubernetes configuration directory and file. +@note: Ensures $HOME/.kube directory and $HOME/.kube/config file exist. +' +function setup_kube_directory { + mkdir $HOME/.kube || true + touch $HOME/.kube/config +} + +: ' +@brief: Add Docker Hub credentials to Kubernetes node. +@param $1: Container name/ID. +@global: COPY_DOCKER_LOGIN - If "true", copies credentials. +@note: Uses hosts $HOME/.docker/config.json. +' +function add_docker_hub_credentials_to_kubernetes { + # Add Docker hub credentials to Minikube + if [ "$COPY_DOCKER_LOGIN" = "true" ] + then + set +ex + + docker exec $1 bash -c "echo '$(cat $HOME/.docker/config.json)'| sudo tee -a /var/lib/kubelet/config.json > /dev/null && sudo systemctl restart kubelet" + + set -ex + fi +} + +: ' +@brief: Update Docker daemon configuration and restart service. +@param $1: JSON string for Docker daemon configuration. +@note: Requires sudo permissions. +' +function updateDockerDaemonConfiguration() { + # We need to add such host to insecure-registry (as localhost is default) + echo $1 | sudo tee /etc/docker/daemon.json + # we need to restart docker service to propagate configuration + systemctl restart docker +} + +: ' +@brief: Increases the inotify user watches and user instances limits on a Linux system. +@param: None. +@global: None. +@note: Inotify is a Linux subsystem used for file system event notifications. This function + helps adjust the limits for applications or services that monitor a large number + of files or directories. + This is specifically needed for multi-node control plane cluster + https://github.com/kubernetes-sigs/kind/issues/2744#issuecomment-1127808069 +' +function adjust_inotify_limits { + # Increase the inotify user watches limit + echo "Setting fs.inotify.max_user_watches to 655360..." + echo fs.inotify.max_user_watches=655360 | sudo tee -a /etc/sysctl.conf + + # Increase the inotify user instances limit + echo "Setting fs.inotify.max_user_instances to 1280..." + echo fs.inotify.max_user_instances=1280 | sudo tee -a /etc/sysctl.conf + + # Reload the system configuration settings + echo "Reloading sysctl settings..." + sudo sysctl -p + + echo "Inotify limits adjusted successfully." +} + +setup_kube_directory +install_kubectl +install_kubernetes_provisioner +adjust_inotify_limits + +reg_name='kind-registry' +reg_port='5001' + +if [[ "$IP_FAMILY" = "ipv4" || "$IP_FAMILY" = "dual" ]]; then + hostname=$(hostname --ip-address | grep -oE '\b([0-9]{1,3}\.){3}[0-9]{1,3}\b' | awk '$1 != "127.0.0.1" { print $1 }' | head -1) + + # update insecure registries + updateDockerDaemonConfiguration "{ \"insecure-registries\" : [\"${hostname}:${reg_port}\"] }" + + # Create kind cluster with containerd registry config dir enabled + # TODO: kind will eventually enable this by default and this patch will + # be unnecessary. + # + # See: + # https://github.com/kubernetes-sigs/kind/issues/2875 + # https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration + # See: https://github.com/containerd/containerd/blob/main/docs/hosts.md + cat </dev/null || true)" != 'true' ]; then + docker run \ + -d --restart=always -p "${hostname}:${reg_port}:5000" --name "${reg_name}" \ + registry:2 + fi + + # Add the registry config to the nodes + # + # This is necessary because localhost resolves to loopback addresses that are + # network-namespace local. + # In other words: localhost in the container is not localhost on the host. + # + # We want a consistent name that works from both ends, so we tell containerd to + # alias localhost:${reg_port} to the registry container when pulling images + # note: kind get nodes (default name `kind` and with specifying new name we have to use --name + # See https://kind.sigs.k8s.io/docs/user/local-registry/ + REGISTRY_DIR="/etc/containerd/certs.d/${hostname}:${reg_port}" + + for node in $(kind get nodes --name kind-cluster); do + echo "Executing command in node:${node}" + docker exec "${node}" mkdir -p "${REGISTRY_DIR}" + cat </dev/null || true)" != 'true' ]; then + docker run \ + -d --restart=always -p "[${ula_fixed_ipv6}::1]:${reg_port}:5000" --name "${reg_name}" \ + registry:2 + fi + # we need to also make a DNS record for docker tag because it seems that such version does not support []:: format + echo "${ula_fixed_ipv6}::1 ${registry_dns}" >> /etc/hosts + + # note: kind get nodes (default name `kind` and with specifying new name we have to use --name + # See https://kind.sigs.k8s.io/docs/user/local-registry/ + for node in $(kind get nodes --name kind-cluster); do + echo "Executing command in node:${node}" + cat <