diff --git a/rgw/v2/tests/s3_swift/configs/test_user_with_placement_id_storage_class.yaml b/rgw/v2/tests/s3_swift/configs/test_user_with_placement_id_storage_class.yaml new file mode 100644 index 000000000..c87d3d06e --- /dev/null +++ b/rgw/v2/tests/s3_swift/configs/test_user_with_placement_id_storage_class.yaml @@ -0,0 +1,10 @@ +#ceph-qe-scripts/rgw/v2/tests/s3_swift/user_create.py +# Polarian: CEPH-83575880 +# BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2293615 +config: + user_count: 1 + user_type: non-tenanted + test_ops: + create_bucket: false + swift_user: false + user_with_default_placement_and_storageclass: true diff --git a/rgw/v2/tests/s3_swift/configs/test_user_with_placement_id_storage_class_cold.yaml b/rgw/v2/tests/s3_swift/configs/test_user_with_placement_id_storage_class_cold.yaml new file mode 100644 index 000000000..c082adcc1 --- /dev/null +++ b/rgw/v2/tests/s3_swift/configs/test_user_with_placement_id_storage_class_cold.yaml @@ -0,0 +1,11 @@ +#ceph-qe-scripts/rgw/v2/tests/s3_swift/user_create.py +# Polarian: CEPH-83575880 +# BZ: https://bugzilla.redhat.com/show_bug.cgi?id=2293615 +config: + user_count: 1 + user_type: non-tenanted + test_ops: + create_bucket: false + swift_user: false + user_with_default_placement_and_storageclass: true + storage_class: "cold" diff --git a/rgw/v2/tests/s3_swift/reusable.py b/rgw/v2/tests/s3_swift/reusable.py index 1936a0bcf..588a95262 100644 --- a/rgw/v2/tests/s3_swift/reusable.py +++ b/rgw/v2/tests/s3_swift/reusable.py @@ -2611,3 +2611,31 @@ def create_storage_class_in_all_zones(current_zone, rgw_ssh_con, config): f"ceph osd pool application enable {pool_name} rgw" ) rgw_ssh_con.exec_command("radosgw-admin period update --commit") + + +def validate_default_placement_and_storageclass_for_user( + uid, placement_id, storage_class +): + """ + This function is to validate default_placement and storageclass set to user + uid: uid of the user + placement_id: placement_id set to the user + storage_class: storage_class set to the user + """ + out = json.loads(utils.exec_shell_cmd(f"radosgw-admin user info --uid={uid}")) + if out["default_placement"] != str(placement_id): + raise AssertionError(f"default Placement set for user: {uid} is failed") + if out["default_storage_class"] != str(storage_class): + raise AssertionError(f"default storage class set for user: {uid} is failed") + + +def get_placement_and_storageclass_from_cluster(): + """ + This function is to fetch placement_id and storage_class from the cluster. + """ + cmd = "radosgw-admin zone get" + out = json.loads(utils.exec_shell_cmd(cmd)) + placement_id = out["placement_pools"][0]["key"] + storage_classes = out["placement_pools"][0]["val"]["storage_classes"] + storage_class_list = list(storage_classes.keys()) + return placement_id, storage_class_list diff --git a/rgw/v2/tests/s3_swift/user_create.py b/rgw/v2/tests/s3_swift/user_create.py index 47b8fab0f..4f8b8427a 100644 --- a/rgw/v2/tests/s3_swift/user_create.py +++ b/rgw/v2/tests/s3_swift/user_create.py @@ -1,3 +1,10 @@ +""" + +configs/test_user_with_placement_id_storage_class.yaml +configs/test_user_with_placement_id_storage_class_cold.yaml +configs/test_user_modify_with_placementid.yaml +""" + import os import sys @@ -91,6 +98,20 @@ def test_exec(config, ssh_con): if "*** Caught signal (Aborted) **" in out: raise AssertionError("user modify with placementid caused crash!!") + if config.test_ops.get("user_with_default_placement_and_storageclass", False): + ( + placement_id, + storage_class_list, + ) = reusable.get_placement_and_storageclass_from_cluster() + user_name = "Testuser_defaultstorage" + storage_class = config.test_ops.get("storage_class", storage_class_list[0]) + placement_id = config.test_ops.get("placement_id", placement_id) + cmd = f"radosgw-admin user create --uid={user_name} --display-name {user_name} --storage-class {storage_class} --placement-id {placement_id}" + out = utils.exec_shell_cmd(cmd) + reusable.validate_default_placement_and_storageclass_for_user( + user_name, placement_id, storage_class + ) + is_multisite = utils.is_cluster_multisite() if is_multisite: log.info("Cluster is multisite")