Skip to content

Commit

Permalink
add debug action
Browse files Browse the repository at this point in the history
  • Loading branch information
eaudetcobello committed Sep 11, 2024
1 parent 0f2f9c5 commit 8473956
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 20 deletions.
18 changes: 13 additions & 5 deletions .github/workflows/e2e-deleteme.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@ on:

permissions:
contents: read
id-token: write

jobs:
run-e2e-tests:
name: Run E2E Tests
runs-on: [self-hosted, linux, X64, jammy, large]
runs-on: ubuntu-latest
strategy:
matrix:
ginkgo_focus:
Expand All @@ -19,8 +20,19 @@ jobs:
#- "Workload cluster scaling"
#- "Workload cluster upgrade"
steps:
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
audience: sts.amazonaws.com
aws-region: us-east-2
role-to-assume: arn:aws:iam::018302341396:role/GithubOIDC
role-duration-seconds: 3600
- name: Check out repo
uses: actions/checkout@v4
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
with:
detached: true
- name: Install requirements
run: |
sudo apt install make
Expand All @@ -35,7 +47,3 @@ jobs:
sudo -E ./hack/ci-e2e-tests.sh true aws v0.1.2
env:
GOPROXY: direct
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
AWS_REGION: us-east-2
35 changes: 23 additions & 12 deletions hack/ci-e2e-tests.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#!/bin/bash

set -xe
set -ve

# This script is used to run e2e tests for the CK8s CAPI.
# It sets up an LXD container, installs the CK8s management cluster, and runs e2e tests.
Expand All @@ -20,8 +20,8 @@ readonly SKIP_CLEANUP=${1:-true}
readonly INFRA_PROVIDER=${2:-aws}
readonly CK8S_PROVIDER_VERSION=${3:-v0.1.2}

readonly LXD_CHANNEL="5.21/stable"
readonly LXC_IMAGE="ubuntu:20.04"
readonly LXD_CHANNEL="6.1/stable"
readonly LXC_IMAGE="ubuntu:22.04"
readonly K8S_PROFILE_URL="https://raw.githubusercontent.com/canonical/k8s-snap/main/tests/integration/lxd-profile.yaml"
readonly K8S_PROFILE_PATH="/tmp/k8s.profile"
readonly CONTAINER_NAME="k8s-test"
Expand Down Expand Up @@ -51,16 +51,28 @@ function exec_in_container {
lxc exec $CONTAINER_NAME -- bash -c "$1"
}

function setup_firewall {
if sudo iptables -L DOCKER-USER; then
sudo iptables -I DOCKER-USER -i lxdbr0 -j ACCEPT
sudo iptables -I DOCKER-USER -o lxdbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
fi
}

# Install LXD snap
function install_lxd {
sudo snap install lxd --channel=$LXD_CHANNEL
if snap list lxd; then
sudo snap refresh lxd --channel=$LXD_CHANNEL
else
sudo snap install lxd --channel=$LXD_CHANNEL
fi
sudo lxd waitready
sudo lxd init --auto
sudo usermod --append --groups lxd "$USER"
}

# Create or ensure the k8s profile exists
function setup_lxd_profile {
lxc profile create k8s || true
lxc profile show k8s || lxc profile create k8s
wget -q $K8S_PROFILE_URL -O $K8S_PROFILE_PATH
cat $K8S_PROFILE_PATH | lxc profile edit k8s
rm -f $K8S_PROFILE_PATH
Expand All @@ -69,12 +81,14 @@ function setup_lxd_profile {
# Setup and configure the container
function setup_container {
lxc launch $LXC_IMAGE $CONTAINER_NAME -p default -p k8s
# Wait for container to be ready to run commands
until exec_in_container true; do
sleep 1
done

exec_in_container "apt update && apt install -y snapd"
exec_in_container "systemctl start snapd"
exec_in_container "snap wait core seed.loaded"

# Script is running from the hack directory, so push the entire directory to the container
lxc file push -r .. $CONTAINER_NAME/root/
Expand All @@ -85,25 +99,21 @@ function configure_container_env {
# Check for clusterawsadm binary
exec_in_container "which clusterawsadm" || error_exit "clusterawsadm binary not found in container"

set +x
lxc config set $CONTAINER_NAME environment.AWS_REGION "$AWS_REGION"
lxc config set $CONTAINER_NAME environment.AWS_SECRET_ACCESS_KEY "$AWS_SECRET_ACCESS_KEY"
lxc config set $CONTAINER_NAME environment.AWS_ACCESS_KEY_ID "$AWS_ACCESS_KEY_ID"

local aws_creds
aws_creds=$(lxc exec "$CONTAINER_NAME" -- bash -c "clusterawsadm bootstrap credentials encode-as-profile")

lxc config set "$CONTAINER_NAME" environment.AWS_B64ENCODED_CREDENTIALS "$aws_creds"
set -x
lxc config set "$CONTAINER_NAME" environment.AWS_B64ENCODED_CREDENTIALS "$AWS_B64ENCODED_CREDENTIALS"
fi
}

# Main installation and configuration
function setup_management_cluster {
sleep 5
exec_in_container "snap install k8s --classic --edge"
sleep 1
exec_in_container "snap install go --classic"
exec_in_container "sudo snap install k8s --classic --edge"
exec_in_container "sudo snap install go --classic"
exec_in_container "mkdir -p /root/.kube"
exec_in_container "sudo k8s bootstrap"
exec_in_container "sudo k8s status --wait-ready"
Expand Down Expand Up @@ -176,6 +186,7 @@ function main {
check_required_env_vars
install_lxd
setup_lxd_profile
setup_firewall
setup_container
setup_management_cluster
clone_repos
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/config/ck8s-aws.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ providers:
# By default, will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
# to init the management cluster
- name: v2.6.1 # used during e2e-test
value: "../../../../cluster-api-provider-aws/config/default"
value: "../../../../cluster-api-provider-aws/config/default" # TODO don't use relative path
contract: v1beta2
files:
- sourcePath: "../data/shared/v1beta1_aws/metadata.yaml"
Expand All @@ -46,7 +46,7 @@ providers:
# default version for docker infrastructure provider
# name here should match defaultProviderVersion
- name: v1.9.99
value: "../../../../cluster-api-provider-aws/config/default"
value: "../../../../cluster-api-provider-aws/config/default" # TODO don't use relative path
contract: v1beta2
files:
- sourcePath: "../data/shared/v1beta1_aws/metadata.yaml"
Expand Down Expand Up @@ -95,7 +95,7 @@ variables:
AWS_NODE_INSTANCE_TYPE: t3.large
AWS_PUBLIC_IP: false
AWS_CREATE_BASTION: true
AWS_SSH_KEY_NAME: "etienne"
AWS_SSH_KEY_NAME: ""
AWS_AMI_ID: "ami-05145146e3a9db6f3"
AWS_CONTROL_PLANE_ROOT_VOLUME_SIZE: 16
AWS_NODE_ROOT_VOLUME_SIZE: 16
Expand Down

0 comments on commit 8473956

Please sign in to comment.