Skip to content

Commit

Permalink
mdr+cnv appset pull app and other fixes (#9935)
Browse files Browse the repository at this point in the history
Signed-off-by: Parikshith <[email protected]>
  • Loading branch information
parikshithb authored Jun 26, 2024
1 parent 4821e84 commit 19b7bca
Show file tree
Hide file tree
Showing 4 changed files with 199 additions and 76 deletions.
17 changes: 13 additions & 4 deletions conf/ocsci/mdr_workload.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,24 @@ ENV_DATA:
dr_workload_app_pvc_selector: {'appname': 'busybox_app5'}, pod_count: 2, pvc_count: 2
},
]
dr_cnv_workload_appset: [
{ name: "vm-appset-1", workload_dir: "mdr/cnv-workload/appset/vm-appset-1",
dr_workload_app_placement_name: "vm-appset-1-placement", vm_name: "vm-workload-1",
dr_cnv_workload_appset_push: [
{ name: "vm-appset-push-1", destination_namespace: "vm-appset-push-1", appset_model: "push",
workload_dir: "mdr/cnv-workload/appset/vm-appset-push-1",
dr_workload_app_placement_name: "vm-appset-push-1-placement", vm_name: "vm-workload-1",
vm_secret: "vm-secret-1", vm_username: "cirros",
dr_workload_app_pvc_selector: { 'appname': 'kubevirt' }, pod_count: 1, pvc_count: 1
},
]
dr_cnv_workload_appset_pull: [
{ name: "vm-appset-pull-1", destination_namespace: "vm-appset-pull-1", appset_model: "pull",
workload_dir: "mdr/cnv-workload/appset/vm-appset-pull-1",
dr_workload_app_placement_name: "vm-appset-pull-1-placement", vm_name: "vm-workload-1",
vm_secret: "vm-secret-1", vm_username: "cirros",
dr_workload_app_pvc_selector: { 'appname': 'kubevirt' }, pod_count: 1, pvc_count: 1
},
]
dr_cnv_workload_sub: [
{ name: "vm-sub-1", workload_dir: "mdr/cnv-workload/subscription/vm-sub-1",
{ name: "vm-sub-1", destination_namespace: "vm-sub-1", workload_dir: "mdr/cnv-workload/subscription/vm-sub-1",
dr_workload_app_placement_name: "vm-sub-1-placement", vm_name: "vm-workload-1",
vm_secret: "vm-secret-1", vm_username: "cirros",
dr_workload_app_pvc_selector: { 'appname': 'kubevirt' }, pod_count: 1, pvc_count: 1
Expand Down
138 changes: 120 additions & 18 deletions ocs_ci/ocs/dr/dr_workload.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

from ocs_ci.framework import config
from ocs_ci.helpers import dr_helpers
from ocs_ci.helpers.cnv_helpers import create_vm_secret
from ocs_ci.helpers.cnv_helpers import create_vm_secret, cal_md5sum_vm
from ocs_ci.helpers.helpers import (
delete_volume_in_backend,
verify_volume_deleted_in_backend,
Expand Down Expand Up @@ -582,32 +582,18 @@ def __init__(self, **kwargs):
self.drpc_yaml_file = os.path.join(constants.DRPC_PATH)
self.cnv_workload_placement_name = kwargs.get("workload_placement_name")
self.cnv_workload_pvc_selector = kwargs.get("workload_pvc_selector")
self.appset_model = kwargs.get("appset_model", None)

def deploy_workload(self):
"""
Deployment specific to cnv workloads
"""
self._deploy_prereqs()
self.workload_namespace = self._get_workload_namespace()
self.vm_obj = VirtualMachine(
vm_name=self.vm_name, namespace=self.workload_namespace
)

# Creating secrets to access the VMs via SSH
for cluster in get_non_acm_cluster_config():
config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"])
try:
create_project(project_name=self.workload_namespace)
except CommandFailed as ex:
if str(ex).find("(AlreadyExists)"):
log.warning("The namespace already exists !")

self.vm_secret_obj.append(
create_vm_secret(
secret_name=self.vm_secret_name, namespace=self.workload_namespace
)
)
self.manage_dr_vm_secrets()

# Load DRPC
drpc_yaml_data = templating.load_yaml(self.drpc_yaml_file)
Expand Down Expand Up @@ -635,12 +621,59 @@ def deploy_workload(self):
)
log.info(cnv_workload_yaml_data_load)
for cnv_workload_yaml_data in cnv_workload_yaml_data_load:
# Update Channel for sub apps
if self.workload_type == constants.SUBSCRIPTION:
# Update channel for Subscription apps
if cnv_workload_yaml_data["kind"] == "Channel":
cnv_workload_yaml_data["spec"]["pathname"] = self.workload_repo_url
elif cnv_workload_yaml_data["kind"] == "ApplicationSet":
cnv_workload_yaml_data["metadata"]["name"] = self.workload_name
# Change the destination namespace for AppSet workload
cnv_workload_yaml_data["spec"]["template"]["spec"]["destination"][
"namespace"
] = self.workload_namespace

# Change the AppSet placement label
for generator in cnv_workload_yaml_data["spec"]["generators"]:
if (
"clusterDecisionResource" in generator
and "labelSelector" in generator["clusterDecisionResource"]
):
labels = generator["clusterDecisionResource"][
"labelSelector"
].get("matchLabels", {})
if "cluster.open-cluster-management.io/placement" in labels:
labels[
"cluster.open-cluster-management.io/placement"
] = self.cnv_workload_placement_name

if self.appset_model == "pull":
# load appset_yaml_file, add "annotations" key and add values to it
cnv_workload_yaml_data["spec"]["template"]["metadata"].setdefault(
"annotations", {}
)
cnv_workload_yaml_data["spec"]["template"]["metadata"][
"annotations"
][
"apps.open-cluster-management.io/ocm-managed-cluster"
] = "{{name}}"
cnv_workload_yaml_data["spec"]["template"]["metadata"][
"annotations"
]["argocd.argoproj.io/skip-reconcile"] = "true"

# Assign values to the "labels" key
cnv_workload_yaml_data["spec"]["template"]["metadata"]["labels"][
"apps.open-cluster-management.io/pull-to-ocm-managed-cluster"
] = "true"

if cnv_workload_yaml_data["kind"] == constants.PLACEMENT:
cnv_workload_yaml_data["metadata"][
"name"
] = self.cnv_workload_placement_name
cnv_workload_yaml_data["metadata"]["namespace"] = (
self.workload_namespace
if self.workload_type == constants.SUBSCRIPTION
else constants.GITOPS_CLUSTER_NAMESPACE
)
# Update preferred cluster name
cnv_workload_yaml_data["spec"]["predicates"][0][
"requiredClusterSelector"
Expand Down Expand Up @@ -786,6 +819,75 @@ def delete_workload(self, force=False):
err_msg = f"Failed to delete the workload: {ex}"
raise ResourceNotDeleted(err_msg)

def manage_dr_vm_secrets(self):
"""
Create secrets to access the VMs via SSH. If a secret already exists, delete and recreate it.
"""
for cluster in get_non_acm_cluster_config():
config.switch_ctx(cluster.MULTICLUSTER["multicluster_index"])

# Create namespace if it doesn't exist
try:
create_project(project_name=self.workload_namespace)
except CommandFailed as ex:
if "(AlreadyExists)" in str(ex):
log.warning("The namespace already exists!")

# Create or recreate the secret for ssh access
try:
log.info(
f"Creating secret namespace {self.workload_namespace} for ssh access"
)
self.vm_secret_obj.append(
create_vm_secret(
secret_name=self.vm_secret_name,
namespace=self.workload_namespace,
)
)
except CommandFailed as ex:
if "(AlreadyExists)" in str(ex):
log.warning(
f"Secret {self.vm_secret_name} already exists in namespace {self.workload_namespace}, "
f"deleting and recreating the secret to fetch the right SSH pub key."
)
ocp.OCP(
kind=constants.SECRET,
namespace=self.workload_namespace,
).delete(resource_name=self.vm_secret_name, wait=True)
self.vm_secret_obj.append(
create_vm_secret(
secret_name=self.vm_secret_name,
namespace=self.workload_namespace,
)
)


def validate_data_integrity_vm(
cnv_workloads, file_name, md5sum_original, app_state="FailOver"
):
"""
Validates the MD5 checksum of files on VMs after FailOver/Relocate.
Args:
cnv_workloads (list): List of workloads, each containing vm_obj, vm_username, and workload_name.
file_name (str): Name/path of the file to validate md5sum on.
md5sum_original (list): List of original MD5 checksums for the file.
app_state (str): State of the app FailOver/Relocate to log it during validation
"""
for count, cnv_wl in enumerate(cnv_workloads):
md5sum_new = cal_md5sum_vm(
cnv_wl.vm_obj, file_path=file_name, username=cnv_wl.vm_username
)
log.info(
f"Comparing original checksum: {md5sum_original[count]} of {file_name} with {md5sum_new}"
f" on {cnv_wl.workload_name}after {app_state}"
)
assert (
md5sum_original[count] == md5sum_new
), f"Failed: MD5 comparison after {app_state}"


def validate_data_integrity(namespace, path="/mnt/test/hashfile", timeout=600):
"""
Expand Down
32 changes: 22 additions & 10 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6647,11 +6647,14 @@ def cnv_dr_workload(request):
"""
instances = []

def factory(num_of_vm_subscription=1, num_of_vm_appset=0):
def factory(
num_of_vm_subscription=1, num_of_vm_appset_push=0, num_of_vm_appset_pull=0
):
"""
Args:
num_of_vm_subscription (int): Number of Subscription type workload to be created
num_of_vm_appset (int): Number of ApplicationSet type workload to be created
num_of_vm_appset_push (int): Number of ApplicationSet Push type workload to be created
num_of_vm_appset_pull (int): Number of ApplicationSet Pull type workload to be created
Raises:
ResourceNotDeleted: In case workload resources not deleted properly
Expand All @@ -6662,16 +6665,21 @@ def factory(num_of_vm_subscription=1, num_of_vm_appset=0):
"""
total_pvc_count = 0
workload_types = [
(constants.SUBSCRIPTION, "dr_cnv_workload_sub"),
(constants.APPLICATION_SET, "dr_cnv_workload_appset"),
(constants.SUBSCRIPTION, "dr_cnv_workload_sub", num_of_vm_subscription),
(
constants.APPLICATION_SET,
"dr_cnv_workload_appset_push",
num_of_vm_appset_push,
),
(
constants.APPLICATION_SET,
"dr_cnv_workload_appset_pull",
num_of_vm_appset_pull,
),
]

for workload_type, data_key in workload_types:
for index in range(
num_of_vm_subscription
if workload_type == constants.SUBSCRIPTION
else num_of_vm_appset
):
for workload_type, data_key, num_of_vm in workload_types:
for index in range(num_of_vm):
workload_details = ocsci_config.ENV_DATA[data_key][index]
workload = CnvWorkload(
workload_type=workload_type,
Expand All @@ -6680,6 +6688,7 @@ def factory(num_of_vm_subscription=1, num_of_vm_appset=0):
vm_secret=workload_details["vm_secret"],
vm_username=workload_details["vm_username"],
workload_name=workload_details["name"],
workload_namespace=workload_details["destination_namespace"],
workload_pod_count=workload_details["pod_count"],
workload_pvc_count=workload_details["pvc_count"],
workload_placement_name=workload_details[
Expand All @@ -6688,6 +6697,9 @@ def factory(num_of_vm_subscription=1, num_of_vm_appset=0):
workload_pvc_selector=workload_details[
"dr_workload_app_pvc_selector"
],
appset_model=workload_details["appset_model"]
if workload_type == constants.APPLICATION_SET
else None,
)
instances.append(workload)
total_pvc_count += workload_details["pvc_count"]
Expand Down
Loading

0 comments on commit 19b7bca

Please sign in to comment.