diff --git a/ocs_ci/ocs/cluster.py b/ocs_ci/ocs/cluster.py index 441059f01a7..d4f979a268f 100644 --- a/ocs_ci/ocs/cluster.py +++ b/ocs_ci/ocs/cluster.py @@ -1368,7 +1368,6 @@ def parse_ceph_df_pools(raw_output: str) -> pd.DataFrame: "%USED", "MAX AVAIL", "QUOTA OBJECTS", - "QUOTA OBJECTS", "QUOTA BYTES", "DIRTY", "USED COMPR", @@ -1415,6 +1414,7 @@ def validate_num_of_pgs(expected_pgs: dict[str, int]) -> bool: Returns: bool: True if all pools have the expected number of PGs, False otherwise. """ + ceph_df_output = get_ceph_df_detail(format=None, out_yaml_format=False) pools_df = parse_ceph_df_pools(ceph_df_output) pools_dict = ceph_details_df_to_dict(pools_df) diff --git a/tests/cross_functional/ui/test_create_pool_block_pool.py b/tests/cross_functional/ui/test_create_pool_block_pool.py index 76ebfe40797..5f643356854 100644 --- a/tests/cross_functional/ui/test_create_pool_block_pool.py +++ b/tests/cross_functional/ui/test_create_pool_block_pool.py @@ -1,10 +1,13 @@ import logging import pytest + +from ocs_ci.framework import config from ocs_ci.framework.pytest_customization.marks import ( tier1, skipif_ui_not_support, skipif_hci_provider_or_client, green_squad, + bugzilla, ) from ocs_ci.framework.testlib import skipif_ocs_version, ManageTest, ui from ocs_ci.ocs.exceptions import ( @@ -18,8 +21,13 @@ validate_compression, validate_replica_data, check_pool_compression_replica_ceph_level, + validate_num_of_pgs, ) from ocs_ci.ocs.ui.block_pool import BlockPoolUI +from ocs_ci.ocs.resources.pod import get_ceph_tools_pod +from ocs_ci.ocs.ocp import OCP +from ocs_ci.utility.utils import run_cmd + logger = logging.getLogger(__name__) @@ -34,6 +42,7 @@ pytest.param(*[3, False], marks=pytest.mark.polarion_id("OCS-2588")), pytest.param(*[2, True], marks=pytest.mark.polarion_id("OCS-2587")), pytest.param(*[2, False], marks=pytest.mark.polarion_id("OCS-2586")), + pytest.param(*[2, False], marks=pytest.mark.polarion_id("OCS-6255")), ], ) @skipif_hci_provider_or_client @@ -75,7 +84,8 @@ def pod(self, pod_factory): @ui @tier1 - @skipif_ocs_version("<4.8") + @bugzilla("2253013") + @skipif_ocs_version("<4.16") @green_squad def test_create_delete_pool( self, @@ -95,6 +105,9 @@ def test_create_delete_pool( .* Create POD based on the PVC .* Run IO on the POD .* Check replication and compression + .* Check the values of pg_num , it should be equal to osd_pool_default_pg_num + .* Check PG autoscale is ON + .* New pool is having non-blank deviceclass """ @@ -143,3 +156,42 @@ def test_create_delete_pool( raise PoolNotReplicatedAsNeeded( f"Pool {self.pool_name} not replicated to size {replica}" ) + + # Check pg_num and osd_pool_default_pg_num matches + ct_pod = get_ceph_tools_pod() + osd_pool_default_pg_num = ct_pod.exec_ceph_cmd( + ceph_cmd="ceph config get mon osd_pool_default_pg_num" + ) + logger.info(f"The osd pool default pg num value is {osd_pool_default_pg_num}") + expected_pgs = { + self.pool_name: osd_pool_default_pg_num, + } + assert validate_num_of_pgs( + expected_pgs + ), "pg_num is not equal to the osd pool default pg num" + logger.info( + f"pg_num of the new pool {self.pool_name} " + f"is equal to the osd pool default pg num {osd_pool_default_pg_num}" + ) + + # Check if the pg-autoscale is ON + pool_autoscale_status = ct_pod.exec_ceph_cmd( + ceph_cmd="ceph osd pool autoscale-status" + ) + for pool in pool_autoscale_status: + if pool["pool_name"] == self.pool_name: + assert pool["pg_autoscale_mode"] == "on", "PG autoscale mode is off" + logger.info(f"{self.pool_name} autoscale mode is on") + + # Check the pool is not none + oc_obj = OCP(kind=constants.CEPHBLOCKPOOL) + cbp_output = run_cmd( + cmd=f"oc get cephblockpool/{self.pool_name} -n {config.ENV_DATA['cluster_namespace']} -o yaml" + ) + cbp_output = oc_obj.exec_oc_cmd( + command=f"get cephblockpool/{self.pool_name} -n {config.ENV_DATA['cluster_namespace']} -o yaml" + ) + assert cbp_output["spec"]["deviceClass"] is not None, "The Deviceclass is none" + logger.info( + f"The deviceClass of the pool {self.pool_name} is {cbp_output['spec']['deviceClass']}" + )