diff --git a/ocs_ci/ocs/bucket_utils.py b/ocs_ci/ocs/bucket_utils.py index 989fdc57163..a5c478c227b 100644 --- a/ocs_ci/ocs/bucket_utils.py +++ b/ocs_ci/ocs/bucket_utils.py @@ -15,7 +15,7 @@ from ocs_ci.ocs import constants from ocs_ci.ocs.exceptions import TimeoutExpiredError, UnexpectedBehaviour from ocs_ci.ocs.ocp import OCP -from ocs_ci.utility import templating, version +from ocs_ci.utility import templating from ocs_ci.utility.ssl_certs import get_root_ca_cert from ocs_ci.utility.utils import TimeoutSampler, run_cmd from ocs_ci.helpers.helpers import create_resource @@ -724,12 +724,54 @@ def cli_create_ibmcos_backingstore( ) -def oc_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region): - pass +def oc_create_rgw_backingstore(cld_mgr, backingstore_name, uls_name, region): + """ + Create a new backingstore with RGW underlying storage using oc create command + + Args: + cld_mgr (CloudManager): holds secret for backingstore creation + backingstore_name (str): backingstore name + uls_name (str): underlying storage name + region (str): which region to create backingstore (should be the same as uls) + + """ + bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML) + bs_data["metadata"]["name"] = backingstore_name + bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"] + bs_data["spec"] = { + "type": "s3-compatible", + "s3Compatible": { + "targetBucket": uls_name, + "endpoint": cld_mgr.rgw_client.endpoint, + "signatureVersion": "v2", + "secret": { + "name": cld_mgr.rgw_client.secret.name, + "namespace": bs_data["metadata"]["namespace"], + }, + }, + } + create_resource(**bs_data) -def cli_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region): - pass +def cli_create_rgw_backingstore(mcg_obj, cld_mgr, backingstore_name, uls_name, region): + """ + Create a new backingstore with IBM COS underlying storage using a NooBaa CLI command + + Args: + cld_mgr (CloudManager): holds secret for backingstore creation + backingstore_name (str): backingstore name + uls_name (str): underlying storage name + region (str): which region to create backingstore (should be the same as uls) + + """ + mcg_obj.exec_mcg_cmd( + f"backingstore create s3-compatible {backingstore_name} " + f"--endpoint {cld_mgr.rgw_client.endpoint} " + f"--access-key {cld_mgr.rgw_client.access_key} " + f"--secret-key {cld_mgr.rgw_client.secret_key} " + f"--target-bucket {uls_name}", + use_yes=True, + ) def oc_create_pv_backingstore(backingstore_name, vol_num, size, storage_class): @@ -1757,16 +1799,9 @@ def patch_replication_policy_to_bucket(bucket_name, rule_id, destination_bucket_ rule_id (str): The ID of the replication rule destination_bucket_name (str): The name of the replication destination bucket """ - if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_12: - replication_policy = { - "rules": [ - {"rule_id": rule_id, "destination_bucket": destination_bucket_name} - ] - } - else: - replication_policy = [ - {"rule_id": rule_id, "destination_bucket": destination_bucket_name} - ] + replication_policy = { + "rules": [{"rule_id": rule_id, "destination_bucket": destination_bucket_name}] + } replication_policy_patch_dict = { "spec": { "additionalConfig": {"replicationPolicy": json.dumps(replication_policy)} @@ -1802,6 +1837,31 @@ def update_replication_policy(bucket_name, replication_policy_dict): ).patch(params=json.dumps(replication_policy_patch_dict), format_type="merge") +def patch_replication_policy_to_bucketclass( + bucketclass_name, rule_id, destination_bucket_name +): + """ + Patches replication policy to a bucket + + Args: + bucketclass_name (str): The name of the bucketclass to patch + rule_id (str): The ID of the replication rule + destination_bucket_name (str): The name of the replication destination bucket + """ + + replication_policy = { + "rules": [{"rule_id": rule_id, "destination_bucket": destination_bucket_name}] + } + replication_policy_patch_dict = { + "spec": {"replicationPolicy": json.dumps(replication_policy)} + } + OCP( + kind="bucketclass", + namespace=config.ENV_DATA["cluster_namespace"], + resource_name=bucketclass_name, + ).patch(params=json.dumps(replication_policy_patch_dict), format_type="merge") + + def random_object_round_trip_verification( io_pod, bucket_name, @@ -2029,3 +2089,66 @@ def sample_if_objects_expired(mcg_obj, bucket_name, prefix="", timeout=600, slee assert sampler.wait_for_func_status(result=True), f"{message} are not expired" logger.info(f"{message} are expired") + + +def delete_all_noobaa_buckets(mcg_obj, request): + """ + Deletes all the buckets in noobaa and restores the first.bucket after the current test + + Args: + mcg_obj: MCG object + request: pytest request object + """ + + logger.info("Listing all buckets in the cluster") + buckets = mcg_obj.s3_client.list_buckets() + + logger.info("Deleting all buckets and its objects") + for bucket in buckets["Buckets"]: + logger.info(f"Deleting {bucket} and its objects") + s3_bucket = mcg_obj.s3_resource.Bucket(bucket["Name"]) + s3_bucket.objects.all().delete() + s3_bucket.delete() + + def finalizer(): + if "first.bucket" not in mcg_obj.s3_client.list_buckets()["Buckets"]: + logger.info("Creating the default bucket: first.bucket") + mcg_obj.s3_client.create_bucket(Bucket="first.bucket") + else: + logger.info("Skipping creation of first.bucket as it already exists") + + request.addfinalizer(finalizer) + + +def get_nb_bucket_stores(mcg_obj, bucket_name): + """ + Query the noobaa-db for the backingstores/namespacestores + that a given bucket is using for its data placement + + Args: + mcg_obj: MCG object + bucket_name: name of the bucket + + Returns: + list: list of backingstores/namespacestores names + + """ + stores = set() + bucket_data = bucket_read_api(mcg_obj, bucket_name) + + # Namespacestore bucket + if "namespace" in bucket_data: + read_srcs_list = [ + d["resource"] for d in bucket_data["namespace"]["read_resources"] + ] + write_src = bucket_data["namespace"]["write_resource"]["resource"] + stores.update(read_srcs_list + [write_src]) + + # Data bucket + else: + tiers = [d["tier"] for d in bucket_data["tiering"]["tiers"]] + for tier in tiers: + tier_data = mcg_obj.send_rpc_query("tier_api", "read_tier", {"name": tier}) + stores.update(tier_data["reply"]["attached_pools"]) + + return list(stores) diff --git a/ocs_ci/ocs/constants.py b/ocs_ci/ocs/constants.py index c80df013df0..03807568dfe 100644 --- a/ocs_ci/ocs/constants.py +++ b/ocs_ci/ocs/constants.py @@ -295,6 +295,8 @@ DEFAULT_STORAGECLASS_RBD_THICK = f"{DEFAULT_CLUSTERNAME}-ceph-rbd-thick" DEFAULT_OCS_STORAGECLASS = "default-ocs-storage-class" +THIN_CSI_STORAGECLASS = "thin-csi" + # Independent mode default StorageClasses DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW = f"{DEFAULT_CLUSTERNAME_EXTERNAL_MODE}-ceph-rgw" @@ -1886,6 +1888,18 @@ BACKINGSTORE_TYPE_AZURE = "azure-blob" BACKINGSTORE_TYPE_S3_COMP = "s3-compatible" BACKINGSTORE_TYPE_GOOGLE = "google-cloud-storage" +BACKINGSTORE_TYPE_PV_POOL = "pv-pool" +BACKINGSTORE_TYPE_IBMCOS = "ibm-cos" + +BS_TYPE_TO_PLATFORM_NAME_MAPPING = { + BACKINGSTORE_TYPE_AWS: "aws", + BACKINGSTORE_TYPE_AZURE: "azure", + BACKINGSTORE_TYPE_GOOGLE: "gcp", + BACKINGSTORE_TYPE_PV_POOL: "pv", + BACKINGSTORE_TYPE_S3_COMP: "rgw", + BACKINGSTORE_TYPE_IBMCOS: "ibmcos", +} + # Squads assignment # Tests are assigned to Squads based on patterns matching test path. diff --git a/ocs_ci/ocs/resources/backingstore.py b/ocs_ci/ocs/resources/backingstore.py index 81d2e813f0a..74e691775e4 100644 --- a/ocs_ci/ocs/resources/backingstore.py +++ b/ocs_ci/ocs/resources/backingstore.py @@ -8,11 +8,13 @@ oc_create_azure_backingstore, oc_create_pv_backingstore, oc_create_ibmcos_backingstore, + oc_create_rgw_backingstore, cli_create_google_backingstore, cli_create_azure_backingstore, cli_create_pv_backingstore, cli_create_ibmcos_backingstore, cli_create_aws_backingstore, + cli_create_rgw_backingstore, ) from ocs_ci.ocs.exceptions import ( TimeoutExpiredError, @@ -265,6 +267,7 @@ def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory): "gcp": oc_create_google_backingstore, "azure": oc_create_azure_backingstore, "ibmcos": oc_create_ibmcos_backingstore, + "rgw": oc_create_rgw_backingstore, "pv": oc_create_pv_backingstore, }, "cli": { @@ -272,6 +275,7 @@ def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory): "gcp": cli_create_google_backingstore, "azure": cli_create_azure_backingstore, "ibmcos": cli_create_ibmcos_backingstore, + "rgw": cli_create_rgw_backingstore, "pv": cli_create_pv_backingstore, }, } diff --git a/ocs_ci/ocs/resources/mcg.py b/ocs_ci/ocs/resources/mcg.py index ce57c42e8ce..a9f16fff2ba 100644 --- a/ocs_ci/ocs/resources/mcg.py +++ b/ocs_ci/ocs/resources/mcg.py @@ -1108,3 +1108,42 @@ def reset_admin_pw(self, new_password): logger.info("Waiting a bit for the change to propogate through the system...") sleep(15) + + def get_admin_default_resource_name(self): + """ + Get the default resource name of the admin account + + Returns: + str: The default resource name + + """ + + read_account_output = self.send_rpc_query( + "account_api", + "read_account", + params={ + "email": self.noobaa_user, + }, + ) + return read_account_output.json()["reply"]["default_resource"] + + def get_default_bc_backingstore_name(self): + """ + Get the default backingstore name of the default bucketclass + + Returns: + str: The default backingstore name + + """ + bucketclass_ocp_obj = OCP( + kind=constants.BUCKETCLASS, + namespace=config.ENV_DATA["cluster_namespace"], + resource_name=constants.DEFAULT_NOOBAA_BUCKETCLASS, + ) + return ( + bucketclass_ocp_obj.get() + .get("spec") + .get("placementPolicy") + .get("tiers")[0] + .get("backingStores")[0] + ) diff --git a/tests/conftest.py b/tests/conftest.py index e2a7ef812bd..b638da41847 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7054,3 +7054,157 @@ def reset_conn_score(): from ocs_ci.ocs.resources.stretchcluster import StretchCluster return StretchCluster().reset_conn_score() + + +@pytest.fixture(scope="session") +def allow_default_backingstore_override(request): + """ + Modify the noobaa CR to allow overriding the default backingstore + + """ + + nb_ocp_obj = OCP( + kind="noobaa", + namespace=ocsci_config.ENV_DATA["cluster_namespace"], + resource_name="noobaa", + ) + + def patch_allow_manual_default_backingstore(): + """ + Patch "manualDefaultBackingStore: true" to the noobaa CR + + """ + add_op = [ + {"op": "add", "path": "/spec/manualDefaultBackingStore", "value": True} + ] + nb_ocp_obj.patch( + resource_name=constants.NOOBAA_RESOURCE_NAME, + params=json.dumps(add_op), + format_type="json", + ) + + def finalizer(): + """ + Remove "manualDefaultBackingStore: true" from the noobaa CR + + """ + remove_op = [ + { + "op": "remove", + "path": "/spec/manualDefaultBackingStore", + } + ] + nb_ocp_obj.patch( + resource_name=constants.NOOBAA_RESOURCE_NAME, + params=json.dumps(remove_op), + format_type="json", + ) + + request.addfinalizer(finalizer) + patch_allow_manual_default_backingstore() + + +@pytest.fixture(scope="session") +def override_default_backingstore_session( + request, + mcg_obj_session, + backingstore_factory_session, + allow_default_backingstore_override, +): + return override_default_backingstore_fixture( + request, mcg_obj_session, backingstore_factory_session + ) + + +@pytest.fixture(scope="function") +def override_default_backingstore( + request, mcg_obj_session, backingstore_factory, allow_default_backingstore_override +): + return override_default_backingstore_fixture( + request, mcg_obj_session, backingstore_factory + ) + + +def override_default_backingstore_fixture( + request, mcg_obj_session, backingstore_factory +): + """ + Returns a function that overrides the default backingstore with an alternative + of the same type. + + """ + + bucketclass_ocp_obj = OCP( + kind=constants.BUCKETCLASS, + namespace=ocsci_config.ENV_DATA["cluster_namespace"], + resource_name=constants.DEFAULT_NOOBAA_BUCKETCLASS, + ) + + def _override_nb_default_backingstore_implementation(alt_backingstore_name=None): + """ + 1. If the name of an alternative backingstore is not provided, + Create a new backingstore of the same type as the current default + 2. Update the new default resource of the admin account + 3. Patch the default bucketclass to use the new default backingstore + + Args: + alternative_backingstore_name (str, optional): The name of an alternative backingstore + + """ + + # 1. if the name of an alternative backingstore is not provided, + # Create a new backingstore of the same type as the current default + if alt_backingstore_name is None: + original_bs_type = OCP( + kind="backingstore", + namespace=ocsci_config.ENV_DATA["cluster_namespace"], + resource_name=constants.DEFAULT_NOOBAA_BACKINGSTORE, + ).data["spec"]["type"] + original_bs_platform_name = constants.BS_TYPE_TO_PLATFORM_NAME_MAPPING[ + original_bs_type + ] + if original_bs_platform_name != "pv": + alt_bs_dict = {original_bs_platform_name: [(1, None)]} + elif ocsci_config.ENV_DATA["mcg_only_deployment"]: + alt_bs_dict = {"pv": [1, 20, constants.THIN_CSI_STORAGECLASS]} + else: + alt_bs_dict = {"pv": [(1, 20, constants.DEFAULT_STORAGECLASS_RBD)]} + alt_backingstore_name = backingstore_factory("oc", alt_bs_dict)[0].name + + # 2. Update the new default resource of the admin account + mcg_obj_session.exec_mcg_cmd( + "".join( + ( + f"account update {mcg_obj_session.noobaa_user} ", + f"--new_default_resource={alt_backingstore_name}", + ) + ) + ) + + # 3. Patch the default bucketclass to use the new default backingstore + update_op = [ + { + "op": "replace", + "path": "/spec/placementPolicy/tiers/0/backingStores/0", + "value": alt_backingstore_name, + } + ] + bucketclass_ocp_obj.patch( + resource_name=constants.DEFAULT_NOOBAA_BUCKETCLASS, + params=json.dumps(update_op), + format_type="json", + ) + + return alt_backingstore_name + + def finalizer(): + """ + Change the default backingstore back to the original + + """ + _override_nb_default_backingstore_implementation( + constants.DEFAULT_NOOBAA_BACKINGSTORE + ) + + request.addfinalizer(finalizer) + return _override_nb_default_backingstore_implementation diff --git a/tests/functional/object/mcg/test_bucket_deletion.py b/tests/functional/object/mcg/test_bucket_deletion.py index 2ee6925dc4a..ef3b769e533 100644 --- a/tests/functional/object/mcg/test_bucket_deletion.py +++ b/tests/functional/object/mcg/test_bucket_deletion.py @@ -17,6 +17,7 @@ from ocs_ci.framework.testlib import MCGTest from ocs_ci.helpers.helpers import create_unique_resource_name from ocs_ci.ocs.bucket_utils import ( + delete_all_noobaa_buckets, sync_object_directory, rm_object_recursive, ) @@ -202,40 +203,17 @@ def test_s3_bucket_delete_1t_objects(self, mcg_obj, awscli_pod_session): rm_object_recursive(awscli_pod_session, bucketname, mcg_obj) mcg_obj.s3_resource.Bucket(bucketname).delete() - @pytest.fixture(scope="function") - def default_bucket_teardown(self, request, mcg_obj): - """ - Recreates first.bucket - """ - - def finalizer(): - if "first.bucket" not in mcg_obj.s3_client.list_buckets()["Buckets"]: - logger.info("Creating the default bucket: first.bucket") - mcg_obj.s3_client.create_bucket(Bucket="first.bucket") - else: - logger.info("Skipping creation of first.bucket as it already exists") - - request.addfinalizer(finalizer) - @tier3 @skipif_managed_service @bugzilla("1980299") @pytest.mark.polarion_id("OCS-2704") @skipif_ocs_version("<4.9") - def test_delete_all_buckets(self, mcg_obj, bucket_factory, default_bucket_teardown): + def test_delete_all_buckets(self, request, mcg_obj, bucket_factory): """ Test with deletion of all buckets including the default first.bucket. """ - logger.info("Listing all buckets in the cluster") - buckets = mcg_obj.s3_client.list_buckets() - - logger.info("Deleting all buckets and its objects") - for bucket in buckets["Buckets"]: - logger.info(f"Deleting {bucket} and its objects") - s3_bucket = mcg_obj.s3_resource.Bucket(bucket["Name"]) - s3_bucket.objects.all().delete() - s3_bucket.delete() + delete_all_noobaa_buckets(mcg_obj, request) logger.info("Verifying no bucket exists") assert not mcg_obj.s3_get_all_bucket_names(), "Failed: Buckets exists" diff --git a/tests/functional/object/mcg/test_default_backingstore_override.py b/tests/functional/object/mcg/test_default_backingstore_override.py new file mode 100644 index 00000000000..dae3bfbaad9 --- /dev/null +++ b/tests/functional/object/mcg/test_default_backingstore_override.py @@ -0,0 +1,195 @@ +import json +import logging +from uuid import uuid4 + +import pytest + +from ocs_ci.framework import config +from ocs_ci.framework.pytest_customization.marks import ( + red_squad, + polarion_id, + bugzilla, + tier1, + tier2, + pre_upgrade, + post_upgrade, + skipif_aws_creds_are_missing, + ignore_leftovers, + mcg, +) +from ocs_ci.framework.testlib import MCGTest +from ocs_ci.ocs import constants +from ocs_ci.ocs.bucket_utils import ( + get_nb_bucket_stores, + write_random_test_objects_to_bucket, + compare_bucket_object_list, + patch_replication_policy_to_bucketclass, +) +from ocs_ci.ocs.ocp import OCP + +logger = logging.getLogger(__name__) + + +@mcg +@red_squad +@ignore_leftovers # needed for the upgrade TCs +class TestDefaultBackingstoreOverride(MCGTest): + """ + Test overriding the default noobaa backingstore + + """ + + @tier1 + @polarion_id("OCS-5193") + def test_default_buckets_backingstore( + self, + mcg_obj_session, + override_default_backingstore, + bucket_factory, + ): + """ + 1. Override the default noobaa backingstore + 2. Create a new bucket using the mcg-cli with the default config + 3. Create a new OBC using oc and yamls without specifying the bucketclass + 4. Verify the buckets' backingstore is the new default backingstore + + """ + + # 1. Override the default noobaa backingstore + alt_default_bs_name = override_default_backingstore() + + # 2. Create a new bucket using the mcg-cli with the default backingstore + default_cli_bucket = bucket_factory(amount=1, interface="cli")[0] + + # 3. Create a new OBC using oc and yamls without specifying the bucketclass + default_obc_bucket = bucket_factory(amount=1, interface="oc")[0] + + # 4. Verify the bucket's backingstore is the new default backingstore + assert ( + get_nb_bucket_stores(mcg_obj_session, default_cli_bucket.name)[0] + == alt_default_bs_name + ), "The default mcg-cli bucket does not use the new default backingstore!" + assert ( + get_nb_bucket_stores(mcg_obj_session, default_obc_bucket.name)[0] + == alt_default_bs_name + ), "The default OC bucket does not use the new default backingstore!" + + @pre_upgrade + def test_default_backingstore_override_pre_upgrade( + self, + request, + mcg_obj_session, + override_default_backingstore_session, + ): + """ + 1. Override the current default using the new backingstore of the same type + 2. Verify the new default is set before the upgrade + + """ + # 1. Override the current default using the new backingstore of the same type + alt_default_bs_name = override_default_backingstore_session() + # Cache the new default backingstore name to pass to the post-upgrade test + request.config.cache.set("pre_upgrade_alt_bs_name", alt_default_bs_name) + + # 2. Verify the new default is set before the upgrade + default_admin_resource = mcg_obj_session.get_admin_default_resource_name() + default_bc_bs = mcg_obj_session.get_default_bc_backingstore_name() + assert ( + default_admin_resource == default_bc_bs == alt_default_bs_name + ), "The new default backingstore was not overriden before the upgrade!" + + @post_upgrade + @polarion_id("OCS-5194") + def test_default_backingstore_override_post_upgrade( + self, + request, + mcg_obj_session, + ): + """ + Verify the new default is still set post-upgrade + + """ + # Retrieve the new default backingstore name from the pre-upgrade test + alt_default_bs_name = request.config.cache.get("pre_upgrade_alt_bs_name", None) + + # Verify the new default is still set post-upgrade + default_admin_resource = mcg_obj_session.get_admin_default_resource_name() + default_bc_backingstore = mcg_obj_session.get_default_bc_backingstore_name() + assert ( + default_admin_resource == default_bc_backingstore == alt_default_bs_name + ), "The new default backingstore was not preserved after the upgrade!" + + @pytest.fixture() + def nb_default_bc_cleanup_fixture(self, request): + """ + Clear all replication policies from the default noobaa bucketclass + + """ + + def clear_replication_policies_from_nb_default_bucketclass(): + replication_policy_patch_dict = {"spec": {"replicationPolicy": None}} + + OCP( + kind="bucketclass", + namespace=config.ENV_DATA["cluster_namespace"], + resource_name=constants.DEFAULT_NOOBAA_BUCKETCLASS, + ).patch( + params=json.dumps(replication_policy_patch_dict), format_type="merge" + ) + + request.addfinalizer(clear_replication_policies_from_nb_default_bucketclass) + + @tier2 + @skipif_aws_creds_are_missing + @polarion_id("OCS-5195") + @bugzilla("2237427") + def test_bucketclass_replication_after_default_backingstore_override( + self, + mcg_obj_session, + bucket_factory, + override_default_backingstore, + awscli_pod_session, + test_directory_setup, + nb_default_bc_cleanup_fixture, + ): + """ + 1. Create a target bucket + 2. Set a bucketclass replication policy to the target bucket on the default bucket class + 3. Override the default noobaa backingstore + 4. Create a source OBC under the default bucketclass + 5. Upload objects to the source bucket and verify they are replicated to the target bucket + + """ + # 1. Create a target bucket + target_bucketclass_dict = { + "interface": "OC", + "backingstore_dict": {"aws": [(1, None)]}, + } + target_bucket = bucket_factory(bucketclass=target_bucketclass_dict)[0] + + # 2. Set a bucketclass replication policy to the target bucket on the default bucket class + patch_replication_policy_to_bucketclass( + bucketclass_name=constants.DEFAULT_NOOBAA_BUCKETCLASS, + rule_id=uuid4().hex, + destination_bucket_name=target_bucket.name, + ) + + # 3. Override the default noobaa backingstore + override_default_backingstore() + + # 4. Create a source OBC using the new default backingstore + source_bucket = bucket_factory(interface="OC")[0] + + # 5. Upload objects to the source bucket and verify they are replicated to the target bucket + write_random_test_objects_to_bucket( + awscli_pod_session, + source_bucket.name, + test_directory_setup.origin_dir, + amount=5, + mcg_obj=mcg_obj_session, + ) + assert compare_bucket_object_list( + mcg_obj_session, + source_bucket.name, + target_bucket.name, + ), f"Objects in {source_bucket.name} and {target_bucket.name} dont match" diff --git a/tests/functional/object/mcg/test_write_to_bucket.py b/tests/functional/object/mcg/test_write_to_bucket.py index 1222dfc766f..8ab316ce654 100644 --- a/tests/functional/object/mcg/test_write_to_bucket.py +++ b/tests/functional/object/mcg/test_write_to_bucket.py @@ -31,6 +31,7 @@ skipif_managed_service, bugzilla, skipif_ocs_version, + on_prem_platform_required, ) from ocs_ci.ocs.constants import AWSCLI_TEST_OBJ_DIR from uuid import uuid4 @@ -132,6 +133,20 @@ class TestBucketIO(MCGTest): ], marks=[tier1], ), + pytest.param( + *[ + "OC", + {"interface": "OC", "backingstore_dict": {"rgw": [(1, None)]}}, + ], + marks=[tier1, on_prem_platform_required], + ), + pytest.param( + *[ + "CLI", + {"interface": "CLI", "backingstore_dict": {"rgw": [(1, None)]}}, + ], + marks=[tier1, on_prem_platform_required], + ), ], ids=[ "DEFAULT-BACKINGSTORE", @@ -140,6 +155,8 @@ class TestBucketIO(MCGTest): "GCP-OC-1", "IBMCOS-OC-1", "IBMCOS-CLI-1", + "RGW-OC-1", + "RGW-CLI-1", ], ) @flaky