Merge pull request #127 from CongMinYin/wip-discovery-controller #4
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: "CI" | |
on: | |
push: | |
branches: | |
- '*' | |
tags: | |
- 'v*' | |
pull_request: | |
workflow_dispatch: | |
release: | |
types: | |
- created | |
env: | |
WAIT_INTERVAL_SECS: 1 | |
jobs: | |
build: | |
runs-on: ubuntu-latest | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
with: | |
# git submodule update --init --recursive | |
submodules: recursive | |
- name: Build container images | |
run: make build | |
- name: Save container images | |
run: | | |
. .env | |
docker save $QUAY_NVMEOF:$NVMEOF_VERSION > nvmeof.tar | |
docker save $QUAY_NVMEOFCLI:$NVMEOF_VERSION > nvmeof-cli.tar | |
docker save $QUAY_CEPH:$CEPH_VERSION > vstart-cluster.tar | |
docker save bdevperf > bdevperf.tar | |
docker save nvmeof-devel > nvmeof-devel.tar | |
- name: Upload container images | |
uses: actions/upload-artifact@v3 | |
with: | |
name: ceph_nvmeof_container_images-${{ github.run_number }} | |
path: | | |
nvmeof.tar | |
nvmeof-cli.tar | |
nvmeof-devel.tar | |
vstart-cluster.tar | |
bdevperf.tar | |
- name: Build stand-alone packages (RPMs and Python wheel) | |
id: build-standalone-packages | |
run: | | |
export EXPORT_DIR=$(mktemp -d) | |
make export-rpms | |
make export-python | |
echo "EXPORT_DIR=$EXPORT_DIR" >> "$GITHUB_ENV" | |
- name: Upload stand-alone packages | |
uses: actions/upload-artifact@v3 | |
with: | |
name: ceph_nvmeof_standalone_packages-${{ github.run_number }} | |
path: | | |
${{ env.EXPORT_DIR }}/** | |
pytest: | |
needs: build | |
strategy: | |
fail-fast: false | |
matrix: | |
test: ["cli", "state", "multi_gateway", "server"] | |
runs-on: ubuntu-latest | |
env: | |
HUGEPAGES: 512 # for multi gateway test, approx 256 per gateway instance | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
- name: Setup huge pages | |
run: | | |
make setup HUGEPAGES=$HUGEPAGES | |
- name: Download container images | |
uses: actions/download-artifact@v3 | |
with: | |
name: ceph_nvmeof_container_images-${{ github.run_number }} | |
- name: Load container images | |
run: | | |
docker load < nvmeof-devel.tar | |
docker load < vstart-cluster.tar | |
- name: Start ceph cluster | |
run: | | |
make up SVC=ceph OPTS="--detach" | |
- name: Wait for the ceph cluster container to become healthy | |
timeout-minutes: 3 | |
run: | | |
while true; do | |
container_status=$(docker inspect --format='{{.State.Health.Status}}' ceph) | |
if [[ $container_status == "healthy" ]]; then | |
# success | |
exit 0 | |
else | |
# Wait for a specific time before checking again | |
sleep ${{ env.WAIT_INTERVAL_SECS }} | |
echo -n . | |
fi | |
done | |
- name: Create RBD image | |
run: | | |
echo "💁 ceph list pools:" | |
make exec SVC=ceph OPTS="-T" CMD="ceph osd lspools" | |
echo "💁 rbd create:" | |
make exec SVC=ceph OPTS="-T" CMD="rbd create rbd/mytestdevimage --size 16" | |
echo "💁 ls rbd:" | |
make exec SVC=ceph OPTS="-T" CMD="rbd ls rbd" | |
- name: Run ${{ matrix.test }} test | |
run: | | |
# Run tests code in current dir | |
# Managing pytest’s output: https://docs.pytest.org/en/7.1.x/how-to/output.html | |
make protoc | |
make run SVC="nvmeof-devel" OPTS="--volume=$(pwd)/tests:/src/tests --entrypoint=python3" CMD="-m pytest --show-capture=all -s --full-trace -vv -rA tests/test_${{ matrix.test }}.py" | |
- name: Check coredump existence | |
if: success() || failure() | |
id: check_coredumps | |
uses: andstor/file-existence-action@20b4d2e596410855db8f9ca21e96fbe18e12930b # v2, pinned to SHA for security reasons | |
with: | |
files: "/tmp/coredump/core.*" | |
- name: Upload ${{ matrix.test }} test core dumps | |
if: steps.check_coredumps.outputs.files_exists == 'true' | |
uses: actions/upload-artifact@v3 | |
with: | |
name: ceph_nvmeof_pytest_${{ matrix.test }}_cores-${{ github.run_number }} | |
path: | | |
/tmp/coredump/core.* | |
- name: Display logs | |
if: success() || failure() | |
run: | | |
make logs OPTS="" | |
- name: Tear down | |
if: success() || failure() | |
run: | | |
make down | |
make clean | |
demo: | |
needs: build | |
runs-on: ubuntu-latest | |
env: | |
HUGEPAGES: 512 | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
- name: Setup huge-pages | |
run: make setup HUGEPAGES=$HUGEPAGES | |
- name: Download container images | |
uses: actions/download-artifact@v3 | |
with: | |
name: ceph_nvmeof_container_images-${{ github.run_number }} | |
- name: Load container images | |
run: | | |
docker load < nvmeof.tar | |
docker load < nvmeof-cli.tar | |
docker load < vstart-cluster.tar | |
docker load < bdevperf.tar | |
- name: Start containers | |
run: | | |
make up OPTS=--detach | |
- name: Wait for the Gateway to be listening | |
timeout-minutes: 3 | |
run: | | |
. .env | |
echo using gateway $NVMEOF_IP_ADDRESS port $NVMEOF_GW_PORT | |
until nc -z $NVMEOF_IP_ADDRESS $NVMEOF_GW_PORT; do | |
echo -n . | |
sleep ${{ env.WAIT_INTERVAL_SECS }} | |
done | |
- name: List containers | |
if: success() || failure() | |
run: make ps | |
- name: List processes | |
if: success() || failure() | |
run: make top | |
- name: Test | |
run: | | |
make demo OPTS=-T | |
- name: Get subsystems | |
run: | | |
# https://github.com/actions/toolkit/issues/766 | |
shopt -s expand_aliases | |
eval $(make alias) | |
nvmeof-cli get_subsystems | |
- name: Run bdevperf | |
run: | | |
# see https://spdk.io/doc/nvmf_multipath_howto.html | |
. .env | |
echo -n "ℹ️ Starting bdevperf container" | |
make up SVC=bdevperf OPTS="--detach" | |
sleep 10 | |
echo "ℹ️ bdevperf start up logs" | |
make logs SVC=bdevperf | |
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_SOCKET | tr -d '\n\r' ) | |
rpc="/usr/libexec/spdk/scripts/rpc.py" | |
echo "ℹ️ bdevperf bdev_nvme_set_options" | |
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_set_options -r -1" | |
echo "ℹ️ bdevperf tcp connect ip: $NVMEOF_IP_ADDRESS port: $NVMEOF_IO_PORT nqn: $NQN" | |
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_attach_controller -b Nvme0 -t tcp -a $NVMEOF_IP_ADDRESS -s $NVMEOF_IO_PORT -f ipv4 -n $NQN -l -1 -o 10" | |
echo "ℹ️ bdevperf perform_tests" | |
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_TEST_DURATION | tr -d '\n\r' ) | |
timeout=$(expr $BDEVPERF_TEST_DURATION \* 2) | |
bdevperf="/usr/libexec/spdk/scripts/bdevperf.py" | |
make exec SVC=bdevperf OPTS=-T CMD="$bdevperf -v -t $timeout -s $BDEVPERF_SOCKET perform_tests" | |
- name: Check coredump existence | |
if: success() || failure() | |
id: check_coredumps | |
uses: andstor/file-existence-action@20b4d2e596410855db8f9ca21e96fbe18e12930b # v2, pinned to SHA for security reasons | |
with: | |
files: "/tmp/coredump/core.*" | |
- name: Upload demo core dumps | |
if: steps.check_coredumps.outputs.files_exists == 'true' | |
uses: actions/upload-artifact@v3 | |
with: | |
name: ceph_nvmeof_demo_cores-${{ github.run_number }} | |
path: | | |
/tmp/coredump/core.* | |
# For debugging purposes (provides an SSH connection to the runner) | |
#- name: Setup tmate session | |
# uses: mxschmitt/action-tmate@v3 | |
# with: | |
# limit-access-to-actor: true | |
- name: Display logs | |
if: success() || failure() | |
run: make logs OPTS='' | |
- name: Tear down | |
if: success() || failure() | |
run: | | |
make down | |
make clean | |
discovery: | |
needs: build | |
runs-on: ubuntu-latest | |
env: | |
HUGEPAGES: 768 # 3 spdk instances | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
- name: Setup huge-pages | |
run: make setup HUGEPAGES=$HUGEPAGES | |
- name: Download container images | |
uses: actions/download-artifact@v3 | |
with: | |
name: ceph_nvmeof_container_images-${{ github.run_number }} | |
- name: Load container images | |
run: | | |
docker load < nvmeof.tar | |
docker load < nvmeof-cli.tar | |
docker load < vstart-cluster.tar | |
docker load < bdevperf.tar | |
- name: Start discovery controller | |
run: | | |
docker-compose up --detach discovery | |
- name: Wait for discovery controller to be listening | |
timeout-minutes: 3 | |
run: | | |
. .env | |
container_ip() { | |
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$1" | |
} | |
ip=$(container_ip $DISC1) | |
echo using discovery controller $ip $NVMEOF_DISC_PORT | |
until nc -z $ip $NVMEOF_DISC_PORT; do | |
echo -n . | |
sleep ${{ env.WAIT_INTERVAL_SECS }} | |
done | |
- name: Start gateway with scale=2 | |
run: | | |
docker-compose up --detach --scale nvmeof=2 nvmeof | |
- name: Wait for gateways to be listening | |
timeout-minutes: 3 | |
run: | | |
. .env | |
container_ip() { | |
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$1" | |
} | |
for gw in $GW1 $GW2; do | |
ip=$(container_ip $gw) | |
echo using gateway $ip $NVMEOF_GW_PORT | |
until nc -z $ip $NVMEOF_GW_PORT; do | |
echo -n . | |
sleep ${{ env.WAIT_INTERVAL_SECS }} | |
done | |
echo | |
done | |
- name: List containers | |
if: success() || failure() | |
run: | | |
docker-compose ps | |
- name: List processes | |
if: success() || failure() | |
run: | | |
docker-compose top | |
- name: Create RBD image | |
run: | | |
make rbd OPTS=-T | |
- name: Set up target | |
run: | | |
. .env | |
container_ip() { | |
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$1" | |
} | |
# container id is the default hostname in docker environent | |
# i.e. default gateway name | |
container_id() { | |
docker ps -q -f name=$1 | |
} | |
cli_gw() { | |
gw=$1 | |
shift | |
docker-compose run --rm nvmeof-cli --server-address $gw --server-port $NVMEOF_GW_PORT "$@" | |
} | |
gw1=$(container_ip $GW1) | |
echo ℹ️ Using GW RPC $GW1 address $gw1 port $NVMEOF_GW_PORT | |
cli_gw $gw1 get_subsystems | |
cli_gw $gw1 create_bdev --pool $RBD_POOL --image $RBD_IMAGE_NAME --bdev $BDEV_NAME | |
cli_gw $gw1 create_subsystem --subnqn $NQN --serial $SERIAL | |
cli_gw $gw1 add_namespace --subnqn $NQN --bdev $BDEV_NAME | |
for gw in $GW1 $GW2; do | |
ip=$(container_ip $gw) | |
name=$(container_id $gw) # default hostname - container id | |
echo ℹ️ Create listener address $ip gateway $name | |
cli_gw $ip create_listener --subnqn $NQN --gateway-name $name --traddr $ip --trsvcid $NVMEOF_IO_PORT | |
done | |
cli_gw $gw1 add_host --subnqn $NQN --host "*" | |
for gw in $GW1 $GW2; do | |
ip=$(container_ip $gw) | |
echo ℹ️ Subsystems for name $gw ip $ip | |
cli_gw $ip get_subsystems | |
done | |
- name: Run bdevperf discovery | |
run: | | |
# See | |
# - https://github.com/spdk/spdk/blob/master/doc/jsonrpc.md | |
# - https://spdk.io/doc/nvmf_multipath_howto.html | |
. .env | |
container_ip() { | |
docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$1" | |
} | |
echo -n "ℹ️ Starting bdevperf container" | |
make up SVC=bdevperf OPTS="--detach" | |
sleep 10 | |
echo "ℹ️ bdevperf start up logs" | |
make logs SVC=bdevperf | |
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_SOCKET | tr -d '\n\r' ) | |
ip=$(container_ip $DISC1) | |
rpc="/usr/libexec/spdk/scripts/rpc.py" | |
echo "ℹ️ bdevperf bdev_nvme_set_options" | |
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_set_options -r -1" | |
echo "ℹ️ bdevperf start discovery ip: $ip port: $NVMEOF_DISC_PORT" | |
# -l -1 -o 10 | |
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_start_discovery -b Nvme0 -t tcp -a $ip -s $NVMEOF_DISC_PORT -f ipv4 -w" | |
echo "ℹ️ bdevperf bdev_nvme_get_discovery_info" | |
make exec SVC=bdevperf OPTS=-T CMD="$rpc -v -s $BDEVPERF_SOCKET bdev_nvme_get_discovery_info" | |
echo "ℹ️ bdevperf perform_tests" | |
eval $(make run SVC=bdevperf OPTS="--entrypoint=env" | grep BDEVPERF_TEST_DURATION | tr -d '\n\r' ) | |
timeout=$(expr $BDEVPERF_TEST_DURATION \* 2) | |
bdevperf="/usr/libexec/spdk/scripts/bdevperf.py" | |
make exec SVC=bdevperf OPTS=-T CMD="$bdevperf -v -t $timeout -s $BDEVPERF_SOCKET perform_tests" | |
- name: Check coredump existence | |
if: success() || failure() | |
id: check_coredumps | |
uses: andstor/file-existence-action@20b4d2e596410855db8f9ca21e96fbe18e12930b # v2, pinned to SHA for security reasons | |
with: | |
files: "/var/lib/systemd/coredump/*" | |
- name: Upload demo core dumps | |
if: steps.check_coredumps.outputs.files_exists == 'true' | |
uses: actions/upload-artifact@v1 | |
with: | |
name: ceph_nvmeof_demo_cores-${{ github.run_number }} | |
path: /var/lib/systemd/coredump/* | |
- name: Display logs | |
if: success() || failure() | |
run: make logs OPTS='' | |
- name: Tear down | |
if: success() || failure() | |
run: | | |
make down | |
make clean | |
push-to-registry: | |
if: github.event_name == 'release' && startsWith(github.ref, 'refs/tags/v') | |
needs: [pytest, demo, discovery] | |
runs-on: ubuntu-latest | |
steps: | |
- name: Download container images | |
uses: actions/download-artifact@v3 | |
with: | |
name: ceph_nvmeof_container_images-${{ github.run_number }} | |
- name: Load container images | |
run: | | |
docker load < nvmeof.tar | |
docker load < nvmeof-cli.tar | |
- name: Login to quay.io | |
uses: docker/login-action@v2 | |
with: | |
registry: ${{ vars.CONTAINER_REGISTRY }} | |
username: '${{ vars.CONTAINER_REGISTRY_USERNAME }}' | |
password: '${{ secrets.CONTAINER_REGISTRY_PASSWORD }}' | |
- name: Publish nvmeof containers when release/tag is created | |
run: | | |
make push |