Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test overriding the noobaa-default backingstore + add support for RGW backingstore creation #8193

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
153 changes: 138 additions & 15 deletions ocs_ci/ocs/bucket_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import TimeoutExpiredError, UnexpectedBehaviour
from ocs_ci.ocs.ocp import OCP
from ocs_ci.utility import templating, version
from ocs_ci.utility import templating
from ocs_ci.utility.ssl_certs import get_root_ca_cert
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.helpers.helpers import create_resource
Expand Down Expand Up @@ -724,12 +724,54 @@ def cli_create_ibmcos_backingstore(
)


def oc_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region):
pass
def oc_create_rgw_backingstore(cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with RGW underlying storage using oc create command

Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)

"""
bs_data = templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)
bs_data["metadata"]["name"] = backingstore_name
bs_data["metadata"]["namespace"] = config.ENV_DATA["cluster_namespace"]
bs_data["spec"] = {
"type": "s3-compatible",
"s3Compatible": {
"targetBucket": uls_name,
"endpoint": cld_mgr.rgw_client.endpoint,
"signatureVersion": "v2",
"secret": {
"name": cld_mgr.rgw_client.secret.name,
"namespace": bs_data["metadata"]["namespace"],
},
},
}
create_resource(**bs_data)


def cli_create_s3comp_backingstore(cld_mgr, backingstore_name, uls_name, region):
pass
def cli_create_rgw_backingstore(mcg_obj, cld_mgr, backingstore_name, uls_name, region):
"""
Create a new backingstore with IBM COS underlying storage using a NooBaa CLI command

Args:
cld_mgr (CloudManager): holds secret for backingstore creation
backingstore_name (str): backingstore name
uls_name (str): underlying storage name
region (str): which region to create backingstore (should be the same as uls)

"""
mcg_obj.exec_mcg_cmd(
f"backingstore create s3-compatible {backingstore_name} "
f"--endpoint {cld_mgr.rgw_client.endpoint} "
f"--access-key {cld_mgr.rgw_client.access_key} "
f"--secret-key {cld_mgr.rgw_client.secret_key} "
f"--target-bucket {uls_name}",
use_yes=True,
)


def oc_create_pv_backingstore(backingstore_name, vol_num, size, storage_class):
Expand Down Expand Up @@ -1757,16 +1799,9 @@ def patch_replication_policy_to_bucket(bucket_name, rule_id, destination_bucket_
rule_id (str): The ID of the replication rule
destination_bucket_name (str): The name of the replication destination bucket
"""
if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_12:
replication_policy = {
"rules": [
{"rule_id": rule_id, "destination_bucket": destination_bucket_name}
]
}
else:
replication_policy = [
{"rule_id": rule_id, "destination_bucket": destination_bucket_name}
]
replication_policy = {
"rules": [{"rule_id": rule_id, "destination_bucket": destination_bucket_name}]
}
replication_policy_patch_dict = {
"spec": {
"additionalConfig": {"replicationPolicy": json.dumps(replication_policy)}
Expand Down Expand Up @@ -1802,6 +1837,31 @@ def update_replication_policy(bucket_name, replication_policy_dict):
).patch(params=json.dumps(replication_policy_patch_dict), format_type="merge")


def patch_replication_policy_to_bucketclass(
bucketclass_name, rule_id, destination_bucket_name
):
"""
Patches replication policy to a bucket

Args:
bucketclass_name (str): The name of the bucketclass to patch
rule_id (str): The ID of the replication rule
destination_bucket_name (str): The name of the replication destination bucket
"""

replication_policy = {
"rules": [{"rule_id": rule_id, "destination_bucket": destination_bucket_name}]
}
replication_policy_patch_dict = {
"spec": {"replicationPolicy": json.dumps(replication_policy)}
}
OCP(
kind="bucketclass",
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=bucketclass_name,
).patch(params=json.dumps(replication_policy_patch_dict), format_type="merge")


def random_object_round_trip_verification(
io_pod,
bucket_name,
Expand Down Expand Up @@ -2029,3 +2089,66 @@ def sample_if_objects_expired(mcg_obj, bucket_name, prefix="", timeout=600, slee

assert sampler.wait_for_func_status(result=True), f"{message} are not expired"
logger.info(f"{message} are expired")


def delete_all_noobaa_buckets(mcg_obj, request):
"""
Deletes all the buckets in noobaa and restores the first.bucket after the current test

Args:
mcg_obj: MCG object
request: pytest request object
"""

logger.info("Listing all buckets in the cluster")
buckets = mcg_obj.s3_client.list_buckets()

logger.info("Deleting all buckets and its objects")
for bucket in buckets["Buckets"]:
logger.info(f"Deleting {bucket} and its objects")
s3_bucket = mcg_obj.s3_resource.Bucket(bucket["Name"])
s3_bucket.objects.all().delete()
s3_bucket.delete()

def finalizer():
if "first.bucket" not in mcg_obj.s3_client.list_buckets()["Buckets"]:
logger.info("Creating the default bucket: first.bucket")
mcg_obj.s3_client.create_bucket(Bucket="first.bucket")
else:
logger.info("Skipping creation of first.bucket as it already exists")

request.addfinalizer(finalizer)


def get_nb_bucket_stores(mcg_obj, bucket_name):
"""
Query the noobaa-db for the backingstores/namespacestores
that a given bucket is using for its data placement

Args:
mcg_obj: MCG object
bucket_name: name of the bucket

Returns:
list: list of backingstores/namespacestores names

"""
stores = set()
bucket_data = bucket_read_api(mcg_obj, bucket_name)

# Namespacestore bucket
if "namespace" in bucket_data:
read_srcs_list = [
d["resource"] for d in bucket_data["namespace"]["read_resources"]
]
write_src = bucket_data["namespace"]["write_resource"]["resource"]
stores.update(read_srcs_list + [write_src])

# Data bucket
else:
tiers = [d["tier"] for d in bucket_data["tiering"]["tiers"]]
for tier in tiers:
tier_data = mcg_obj.send_rpc_query("tier_api", "read_tier", {"name": tier})
stores.update(tier_data["reply"]["attached_pools"])

return list(stores)
14 changes: 14 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,8 @@
DEFAULT_STORAGECLASS_RBD_THICK = f"{DEFAULT_CLUSTERNAME}-ceph-rbd-thick"
DEFAULT_OCS_STORAGECLASS = "default-ocs-storage-class"

THIN_CSI_STORAGECLASS = "thin-csi"

# Independent mode default StorageClasses
DEFAULT_EXTERNAL_MODE_STORAGECLASS_RGW = f"{DEFAULT_CLUSTERNAME_EXTERNAL_MODE}-ceph-rgw"

Expand Down Expand Up @@ -1886,6 +1888,18 @@
BACKINGSTORE_TYPE_AZURE = "azure-blob"
BACKINGSTORE_TYPE_S3_COMP = "s3-compatible"
BACKINGSTORE_TYPE_GOOGLE = "google-cloud-storage"
BACKINGSTORE_TYPE_PV_POOL = "pv-pool"
BACKINGSTORE_TYPE_IBMCOS = "ibm-cos"

BS_TYPE_TO_PLATFORM_NAME_MAPPING = {
BACKINGSTORE_TYPE_AWS: "aws",
BACKINGSTORE_TYPE_AZURE: "azure",
BACKINGSTORE_TYPE_GOOGLE: "gcp",
BACKINGSTORE_TYPE_PV_POOL: "pv",
BACKINGSTORE_TYPE_S3_COMP: "rgw",
BACKINGSTORE_TYPE_IBMCOS: "ibmcos",
}


# Squads assignment
# Tests are assigned to Squads based on patterns matching test path.
Expand Down
4 changes: 4 additions & 0 deletions ocs_ci/ocs/resources/backingstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,13 @@
oc_create_azure_backingstore,
oc_create_pv_backingstore,
oc_create_ibmcos_backingstore,
oc_create_rgw_backingstore,
cli_create_google_backingstore,
cli_create_azure_backingstore,
cli_create_pv_backingstore,
cli_create_ibmcos_backingstore,
cli_create_aws_backingstore,
cli_create_rgw_backingstore,
)
from ocs_ci.ocs.exceptions import (
TimeoutExpiredError,
Expand Down Expand Up @@ -265,13 +267,15 @@ def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"gcp": oc_create_google_backingstore,
"azure": oc_create_azure_backingstore,
"ibmcos": oc_create_ibmcos_backingstore,
"rgw": oc_create_rgw_backingstore,
"pv": oc_create_pv_backingstore,
},
"cli": {
"aws": cli_create_aws_backingstore,
"gcp": cli_create_google_backingstore,
"azure": cli_create_azure_backingstore,
"ibmcos": cli_create_ibmcos_backingstore,
"rgw": cli_create_rgw_backingstore,
"pv": cli_create_pv_backingstore,
},
}
Expand Down
39 changes: 39 additions & 0 deletions ocs_ci/ocs/resources/mcg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,3 +1108,42 @@ def reset_admin_pw(self, new_password):

logger.info("Waiting a bit for the change to propogate through the system...")
sleep(15)

def get_admin_default_resource_name(self):
"""
Get the default resource name of the admin account

Returns:
str: The default resource name

"""

read_account_output = self.send_rpc_query(
"account_api",
"read_account",
params={
"email": self.noobaa_user,
},
)
return read_account_output.json()["reply"]["default_resource"]

def get_default_bc_backingstore_name(self):
"""
Get the default backingstore name of the default bucketclass

Returns:
str: The default backingstore name

"""
bucketclass_ocp_obj = OCP(
kind=constants.BUCKETCLASS,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=constants.DEFAULT_NOOBAA_BUCKETCLASS,
)
return (
bucketclass_ocp_obj.get()
.get("spec")
.get("placementPolicy")
.get("tiers")[0]
.get("backingStores")[0]
)
Loading
Loading