Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable OOB tests #2187

Open
wants to merge 13 commits into
base: improvement/ZENKO-4414
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/actions/deploy/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,11 @@ runs:
shell: bash
run: sh tests/smoke/deploy-sorbet-resources.sh end2end
working-directory: ./.github/scripts/end2end/operator
- name: Deploy metadata
shell: bash
run: bash deploy-metadata.sh
working-directory: ./.github/scripts/end2end
if: ${{ env.ENABLE_RING_TESTS == 'true' }}
- name: End-to-end configuration
shell: bash
run: bash configure-e2e.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "default"
Expand Down
90 changes: 90 additions & 0 deletions .github/scripts/end2end/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,93 @@ get_token() {
jq -cr '.id_token'
}

wait_for_endpoint() {
local host=$1
local port=$2
local timeout_s=$3

kubectl run wait-for-port \
--image=busybox \
--attach=True \
--rm \
--restart=Never \
--pod-running-timeout=5m \
--image-pull-policy=IfNotPresent \
--env="HOST=${host}" \
--env="PORT=${port}" \
--env="TIMEOUT_S=${timeout_s}" \
-- sh -c '
wait_for_endpoint() {
local count=0
echo "waiting for $HOST:$PORT to be available"
while ! nc -z -w 1 $HOST "$PORT"; do
count=$((count + 1))
[ "$count" -ge "$TIMEOUT_S" ] && echo "Error: timedout waiting for $HOST:$PORT after $TIMEOUT_S seconds" && return 1
sleep 1
done
echo "$HOST:$PORT is now available."
}
wait_for_endpoint
'
}

wait_for_all_pods_behind_services() {
local service=$1
local namespace=$2
local port_regex=$3
local timeout_s=$4
kubectl get pods -n $namespace -l app=$service -o jsonpath='{range .items[*]}{.metadata.deletionTimestamp}:{.status.podIP}:{.spec.containers[*].ports[*].containerPort}{"\n"}{end}' | while read -r output; do
deletion_timestamp=$(echo $output | cut -d':' -f1)
ip=$(echo $output | cut -d':' -f2)
ports=$(echo $output | cut -d':' -f3)
# skip pods that are terminating
if [ -n "$deletion_timestamp" ] || [ -z "$ip" ] || [ -z "$ports" ]; then
continue
fi
# waiting for all ports that match the port prefix in cases where
# multiple containers are running within the same pod
for port in $ports; do
if [[ $port == $port_regex ]]; then
wait_for_endpoint $ip $port $timeout_s
fi
done
done
}

# wait for consumer group to be in a stable state (no rebance + at least one consumer connected)
wait_for_consumer_group() {
namespace=$1
# Getting the name of the first kafka pod
kafka_pod=$(kubectl get pods -n $namespace -l brokerId=0,kafka_cr=end2end-base-queue,app=kafka -o jsonpath='{.items[0].metadata.name}')
consumer_group=$2
# When a pod is restarted the previous consumer is kept in the group until the session timeout expires
expected_members=$3
timeout_s=$4
interval_s=${5:-5}
kubectl exec -it $kafka_pod -n $namespace -- bash -c '
export KAFKA_OPTS=
consumer_group=$1
expected_members=$2
timeout_s=$3
interval_s=$4
start_time=$(date +%s)
while true; do
# The state becomes "Stable" when no rebalance is happening and at least one consumer is connected
state=$(kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group $consumer_group --state | awk '"'"'NF>1 && $(NF-1) != "STATE" {print (NF>1?$(NF-1):"None")} {next}'"'"')
members=$(kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group $consumer_group --state | awk '"'"'NF>1 && $NF != "#MEMBERS" {print (NF>1?$NF:"None")} {next}'"'"')
echo "Consumer group $consumer_group state: $state, members: $members"
if [ "$state" == "Stable" ] && [ "$members" -eq "$expected_members" ]; then
echo "Consumer group $consumer_group is now consuming."
exit 0
fi
# Check if we have reached the timeout
current_time=$(date +%s)
elapsed_time=$((current_time - start_time))
if [ "$elapsed_time" -ge "$timeout_s" ]; then
echo "Error: Timed out waiting for consumer group $consumer_group to start consuming."
exit 1
fi
sleep $interval_s
done
' -- "$consumer_group" "$expected_members" "$timeout_s" "$interval_s"
}
2 changes: 1 addition & 1 deletion .github/scripts/end2end/configs/zenko.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ spec:
ingestionProcessor:
concurrency: 2
logging:
logLevel: trace
logLevel: debug
mongodb:
provider: External
endpoints:
Expand Down
7 changes: 7 additions & 0 deletions .github/scripts/end2end/configure-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -109,3 +109,10 @@ sleep 10

kubectl wait --for condition=DeploymentFailure=false --timeout 25m -n ${NAMESPACE} zenko/${ZENKO_NAME}
kubectl wait --for condition=DeploymentInProgress=false --timeout 25m -n ${NAMESPACE} zenko/${ZENKO_NAME}


if [ $ENABLE_RING_TESTS = true ]; then
# wait for ingestion processor to start consuming from Kafka
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this should (eventually) get baked into backbeat?
--> so we should probably add a ticket (and we can put it in the EPIC "cleanup kafka management" https://scality.atlassian.net/browse/ARTESCA-9180)

ingestion_processor_replicas=$(kubectl -n $NAMESPACE get deploy/end2end-backbeat-ingestion-processor -o jsonpath='{.spec.replicas}')
wait_for_consumer_group $NAMESPACE $UUID.backbeat-ingestion-group $ingestion_processor_replicas 300
fi
47 changes: 47 additions & 0 deletions .github/scripts/end2end/deploy-metadata.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#!/bin/sh

set -exu

. "$(dirname $0)/common.sh"

# create a separate namespace for metadata
kubectl create namespace metadata

# clone the metadata repository
git init metadata
cd metadata
git fetch --depth 1 --no-tags https://${GIT_ACCESS_TOKEN}@github.com/scality/metadata.git
git checkout FETCH_HEAD

# install metadata chart in a separate namespace
cd helm
helm dependency update cloudserver/
helm install -n metadata \
--set metadata.persistentVolume.storageClass='' \
--set metadata.sproxyd.persistentVolume.storageClass='' \
s3c cloudserver/

# wait for the repds to be created
kubectl -n metadata rollout status --watch --timeout=300s statefulset/s3c-metadata-repd
# wait for all repd pods to start serving admin API ports
wait_for_all_pods_behind_services metadata-repd metadata "91*" 60

# current chart uses an old version of bucketd that has issues reconnecting to the repd
# when bucketd is started first. Restarting bucketd after repd is ready.
kubectl -n metadata rollout restart deployment/s3c-metadata-bucketd
# wait for the bucketd pods to be created
kubectl -n metadata rollout status --watch --timeout=300s deploy/s3c-metadata-bucketd
# wait for all bucketd pods to start serving port 9000
wait_for_all_pods_behind_services metadata-bucketd metadata 9000 60

# manually add "s3c.local" to the rest endpoints list as it's not configurable in the chart
current_config=$(kubectl get configmap/s3c-cloudserver-config-json -n metadata -o jsonpath='{.data.config\.json}')
updated_config=$(echo "$current_config" | jq '.restEndpoints["s3c.local"] = "us-east-1"')
kubectl patch configmap/s3c-cloudserver-config-json -n metadata --type='merge' -p="$(jq -n --arg v "$updated_config" '{"data": {"config.json": $v}}')"

# restarting cloudserver to take the new configmap changes into account
kubectl -n metadata rollout restart deployment/s3c-cloudserver
# wait for the cloudserver pods to be created
kubectl -n metadata rollout status --watch --timeout=300s deployment/s3c-cloudserver
# wait for the cloudserver pods to start serving port 8000
wait_for_all_pods_behind_services cloudserver metadata 8000 60
1 change: 1 addition & 0 deletions .github/scripts/end2end/patch-coredns.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ corefile="
rewrite name exact prom.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact shell-ui.dr.zenko.local ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact website.mywebsite.com ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact s3c.local s3c-cloudserver.metadata.svc.cluster.local
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
Expand Down
18 changes: 7 additions & 11 deletions .github/workflows/end2end.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ env:
GCP_BACKEND_SERVICE_EMAIL: ${{ secrets.GCP_BACKEND_SERVICE_EMAIL }}
# Enable this for Ring tests
ENABLE_RING_TESTS: "false"
RING_S3C_ACCESS_KEY: ${{ secrets.RING_S3C_BACKEND_ACCESS_KEY }}
RING_S3C_SECRET_KEY: ${{ secrets.RING_S3C_BACKEND_SECRET_KEY }}
RING_S3C_ENDPOINT: ${{ secrets.RING_S3C_BACKEND_ENDPOINT }}
RING_S3C_ACCESS_KEY: accessKey1
RING_S3C_SECRET_KEY: verySecretKey1
RING_S3C_ENDPOINT: http://s3c.local:8000
RING_S3C_BACKEND_SOURCE_LOCATION: rings3cbackendingestion
RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}-${{ github.run_attempt }}
RING_S3C_INGESTION_SRC_BUCKET_NAME: ingestion-test-src-bucket-${{ github.run_id }}
# CTST end2end tests
NOTIF_DEST_NAME: "destination1"
NOTIF_DEST_TOPIC: "destination-topic-1"
Expand Down Expand Up @@ -320,9 +320,6 @@ jobs:
run: |-
cd tests/zenko_tests
envsubst < 'e2e-config.yaml.template' > 'e2e-config.yaml'
if [[ "${ENABLE_RING_TESTS}" == "false" ]]; then
yq -i 'del(.locations[] | select(.locationType == "location-scality-ring-s3-v1"))' e2e-config.yaml
fi
cat e2e-config.yaml
echo 'Generated e2e-config.yaml file'
- name: Build and push CI image
Expand Down Expand Up @@ -556,8 +553,9 @@ jobs:
needs: [build-kafka, build-test-image]
runs-on:
- ubuntu-22.04-8core
# Enable this for Ring-based tests
# - scality-cloud
env:
ENABLE_RING_TESTS: "true"
GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }}
steps:
- name: Checkout
uses: actions/checkout@v4
Expand All @@ -573,8 +571,6 @@ jobs:
registry: ghcr.io
- name: Deploy Zenko
uses: ./.github/actions/deploy
env:
GIT_ACCESS_TOKEN: ${{ secrets.GIT_ACCESS_TOKEN }}
- name: Run backbeat end to end tests
run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "backbeat" "default"
working-directory: ./.github/scripts/end2end
Expand Down
6 changes: 6 additions & 0 deletions tests/zenko_tests/e2e_config/locations.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python

import logging
import os

_log = logging.getLogger("end2end configuration")

Expand All @@ -11,6 +12,11 @@ def create_location(client, uuid, location):
:param uuid: zenko instance uuid
:param location: location details
"""

ENABLE_RING_TESTS = os.environ['ENABLE_RING_TESTS']
if ENABLE_RING_TESTS == "false" and location["locationType"] == "location-scality-ring-s3-v1":
return

try:
Location_V1 = client.get_model('location-v1')
if "bootstrapList" not in location["details"]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ describe('Ingestion pause resume', function () {
'non-existent-location',
(err, data) => {
assert.ifError(err);
assert.strictEqual(data.code, 404);
assert.strictEqual(data.RouteNotFound, true);
return done();
},
Expand Down
4 changes: 2 additions & 2 deletions tests/zenko_tests/node_tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,14 @@
"test_expiration": "mocha --tags ${MOCHA_TAGS} --exit -t 900000 --reporter mocha-multi-reporters --reporter-options configFile=config.json backbeat/tests/lifecycle/expiration.js",
"test_transition": "mocha --tags ${MOCHA_TAGS} --exit -t 900000 --reporter mocha-multi-reporters --reporter-options configFile=config.json backbeat/tests/lifecycle/transition.js",
"test_lifecycle": "mocha --tags ${MOCHA_TAGS} --exit -t 1800000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive backbeat/tests/lifecycle",
"test_ingestion_oob_s3c": "mocha --tags ${MOCHA_TAGS} --exit -t 100000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive backbeat/tests/ingestion",
"test_ingestion_oob_s3c": "mocha --tags ${MOCHA_TAGS} --exit -t 180000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive backbeat/tests/ingestion",
"test_location_quota": "mocha --tags ${MOCHA_TAGS} --exit -t 10000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive cloudserver/locationQuota/tests",
"test_bucket_get_v2": "mocha --tags ${MOCHA_TAGS} --exit -t 10000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive cloudserver/bucketGetV2/tests",
"test_bucket_policy": "mocha --tags ${MOCHA_TAGS} --exit -t 10000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive cloudserver/bucketPolicy/tests",
"test_operator": "mocha --tags ${MOCHA_TAGS} --exit -t 10000 --reporter mocha-multi-reporters --reporter-options configFile=config.json ./init_test.js",
"test_smoke": "mocha --tags ${MOCHA_TAGS} --exit -t 10000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive smoke_tests",
"test_iam_policies": "mocha --tags ${MOCHA_TAGS} --exit -t 15000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive iam_policies",
"test_all_extensions": "run-p --aggregate-output test_aws_crr test_expiration test_transition",
"test_all_extensions": "run-p --aggregate-output test_aws_crr test_expiration test_transition test_ingestion_oob_s3c",
"test_object_api": "mocha --tags ${MOCHA_TAGS} --exit -t 10000 --reporter mocha-multi-reporters --reporter-options configFile=config.json --recursive cloudserver/keyFormatVersion/tests",
"lint": "eslint $(find . -name '*.js' -not -path '*/node_modules/*')"
},
Expand Down
Loading