diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 73d85b24171..60b0dba5b19 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -119,7 +119,8 @@ jobs: env: CGO_LDFLAGS_ALLOW: "(-Wl,-wrap,pthread_create)|(-Wl,-z,now)" INCUS_CEPH_CLUSTER: "ceph" - INCUS_CEPH_CEPHFS: "cephfs" + INCUS_CEPH_CEPHFS: "cephfs-incus" + INCUS_CEPH_CLIENT: "incus" INCUS_CEPH_CEPHOBJECT_RADOSGW: "http://127.0.0.1" INCUS_CONCURRENT: "1" INCUS_VERBOSE: "1" @@ -335,8 +336,7 @@ jobs: sudo microceph enable rgw sudo microceph.ceph osd pool create cephfs_meta 32 sudo microceph.ceph osd pool create cephfs_data 32 - sudo microceph.ceph fs new cephfs cephfs_meta cephfs_data - sudo microceph.ceph fs ls + sudo microceph.ceph fs new ${INCUS_CEPH_CEPHFS} cephfs_meta cephfs_data sleep 30 sudo microceph.ceph status # Wait until there are no more "unkowns" pgs @@ -389,7 +389,7 @@ jobs: - name: Create build directory run: | - mkdir bin + mkdir bin - name: Build static x86_64 incus env: diff --git a/test/README.md b/test/README.md index 95411fe3456..5cf2cc93003 100644 --- a/test/README.md +++ b/test/README.md @@ -16,6 +16,7 @@ Name | Default | Description `INCUS_CEPH_CLUSTER` | ceph | The name of the ceph cluster to create osd pools in `INCUS_CEPH_CEPHFS` | "" | Enables the CephFS tests using the specified cephfs filesystem for `cephfs` pools `INCUS_CEPH_CEPHOBJECT_RADOSGW` | "" | Enables the Ceph Object tests using the specified radosgw HTTP endpoint for `cephobject` pools +`INCUS_CEPH_CLIENT` | "admin" | User to use when authenticating to the Ceph storage cluster `INCUS_CONCURRENT` | 0 | Run concurrency tests, very CPU intensive `INCUS_DEBUG` | 0 | Run incusd, incus and the shell in debug mode (very verbose) `INCUS_INSPECT` | 0 | Don't teardown the test environment on failure diff --git a/test/main.sh b/test/main.sh index decad43790b..60a2da61fd4 100755 --- a/test/main.sh +++ b/test/main.sh @@ -63,6 +63,13 @@ if [ "$INCUS_BACKEND" != "random" ] && ! storage_backend_available "$INCUS_BACKE fi echo "==> Using storage backend ${INCUS_BACKEND}" +if [ "${INCUS_BACKEND}" = "ceph" ]; then + if [ -z "${INCUS_CEPH_CLIENT:-}" ]; then + INCUS_CEPH_CLIENT="admin" + fi + echo "==> Using ceph client ${INCUS_CEPH_CLIENT}" +fi + import_storage_backends cleanup() { diff --git a/test/suites/container_devices_disk.sh b/test/suites/container_devices_disk.sh index fab44538bc1..52b27b772fa 100644 --- a/test/suites/container_devices_disk.sh +++ b/test/suites/container_devices_disk.sh @@ -157,10 +157,21 @@ test_container_devices_disk_cephfs() { fi incus launch testimage ceph-fs -c security.privileged=true - incus config device add ceph-fs fs disk source=cephfs:"${INCUS_CEPH_CEPHFS}"/ ceph.user_name=admin ceph.cluster_name=ceph path=/cephfs + ceph fs authorize "${INCUS_CEPH_CEPHFS}" "client.${INCUS_CEPH_CLIENT}" / rw + incus config device add ceph-fs fs disk \ + source=cephfs:"${INCUS_CEPH_CEPHFS}"/ \ + ceph.user_name="${INCUS_CEPH_CLIENT}" \ + ceph.cluster_name="${INCUS_CEPH_CLUSTER}" \ + path=/cephfs incus exec ceph-fs -- stat /cephfs incus restart ceph-fs --force incus exec ceph-fs -- stat /cephfs + incus exec ceph-fs -- mkdir /cephfs/ro + incus stop ceph-fs + ceph fs authorize "${INCUS_CEPH_CEPHFS}" "client.${INCUS_CEPH_CLIENT}" / rw / ro + incus start ceph-fs + ! incus exec ceph-fs -- touch /cephfs/ro/fail || true + incus exec ceph-fs -- touch /cephfs/succeed incus delete -f ceph-fs } diff --git a/test/suites/storage_driver_cephfs.sh b/test/suites/storage_driver_cephfs.sh index a08f34fb144..39f85740f8d 100644 --- a/test/suites/storage_driver_cephfs.sh +++ b/test/suites/storage_driver_cephfs.sh @@ -7,22 +7,42 @@ test_storage_driver_cephfs() { return fi + if [ "${INCUS_CEPH_CLIENT}" != "admin" ]; then + ceph fs authorize "${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" / rw + fi + # Simple create/delete attempt - incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" + incus storage create cephfs cephfs \ + source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \ + cephfs.user.name="${INCUS_CEPH_CLIENT}" incus storage delete cephfs # Test invalid key combinations for auto-creation of cephfs entities. - ! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.osd_pg_num=32 || true - ! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.meta_pool=xyz || true - ! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.data_pool=xyz || true - ! incus storage create cephfs cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta || true + ! incus storage create cephfs cephfs \ + source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \ + cephfs.user.name="${INCUS_CEPH_CLIENT}" \ + cephfs.osd_pg_num=32 || true + ! incus storage create cephfs cephfs \ + source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \ + cephfs.user.name="${INCUS_CEPH_CLIENT}" \ + cephfs.meta_pool=xyz || true + ! incus storage create cephfs cephfs \ + source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \ + cephfs.user.name="${INCUS_CEPH_CLIENT}" \ + cephfs.data_pool=xyz || true + ! incus storage create cephfs cephfs \ + source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \ + cephfs.user.name="${INCUS_CEPH_CLIENT}" \ + cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta || true # Test cephfs storage volumes. for fs in "cephfs" "cephfs2" ; do if [ "${fs}" = "cephfs" ]; then # Create one cephfs with pre-existing OSDs. - incus storage create "${fs}" cephfs source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" + incus storage create "${fs}" cephfs \ + source="${INCUS_CEPH_CEPHFS}/$(basename "${INCUS_DIR}")" \ + cephfs.user.name="${INCUS_CEPH_CLIENT}" else # Create one cephfs by creating the OSDs and the cephfs itself. incus storage create "${fs}" cephfs source=cephfs2 cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta @@ -64,5 +84,5 @@ test_storage_driver_cephfs() { done # Recreate the fs for other tests. - ceph fs new cephfs cephfs_meta cephfs_data --force + ceph fs new "${INCUS_CEPH_CEPHFS}" cephfs_meta cephfs_data --force }