diff --git a/docker/mongodb-kubernetes-tests/kubetester/mongodb.py b/docker/mongodb-kubernetes-tests/kubetester/mongodb.py index c9970361a..d241fd392 100644 --- a/docker/mongodb-kubernetes-tests/kubetester/mongodb.py +++ b/docker/mongodb-kubernetes-tests/kubetester/mongodb.py @@ -250,7 +250,18 @@ def __repr__(self): def configure( self, - om: MongoDBOpsManager, + om: Optional[MongoDBOpsManager], + project_name: str, + api_client: Optional[client.ApiClient] = None, + ) -> MongoDB: + if om is not None: + return self.configure_ops_manager(om, project_name, api_client=api_client) + else: + return self.configure_cloud_qa(project_name, api_client=api_client) + + def configure_ops_manager( + self, + om: Optional[MongoDBOpsManager], project_name: str, api_client: Optional[client.ApiClient] = None, ) -> MongoDB: @@ -267,6 +278,36 @@ def configure( self["spec"]["credentials"] = om.api_key_secret(self.namespace, api_client=api_client) return self + def configure_cloud_qa( + self, + project_name, + src_project_config_map_name: str = None, + api_client: Optional[client.ApiClient] = None, + ) -> MongoDB: + if "opsManager" in self["spec"]: + del self["spec"]["opsManager"] + + if src_project_config_map_name is None and "cloudManager" in self["spec"]: + src_project_config_map_name = self["spec"]["cloudManager"]["configMapRef"]["name"] + + try: + src_cm = read_configmap(self.namespace, src_project_config_map_name, api_client=api_client) + except client.ApiException as e: + if e.status == 404: + logger.debug("project config map is not specified, trying my-project as the source") + src_cm = read_configmap(self.namespace, "my-project", api_client=api_client) + else: + raise e + + new_project_config_map_name = f"{self.name}-project-config" + ensure_nested_objects(self, ["spec", "cloudManager", "configMapRef"]) + self["spec"]["cloudManager"]["configMapRef"]["name"] = new_project_config_map_name + + src_cm.update({"projectName": f"{self.namespace}-{project_name}"}) + create_or_update_configmap(self.namespace, new_project_config_map_name, src_cm, api_client=api_client) + + return self + def configure_backup(self, mode: str = "enabled") -> MongoDB: ensure_nested_objects(self, ["spec", "backup"]) self["spec"]["backup"]["mode"] = mode diff --git a/docker/mongodb-kubernetes-tests/tests/upgrades/sharded_cluster_operator_upgrade_v1_27_to_mck.py b/docker/mongodb-kubernetes-tests/tests/upgrades/sharded_cluster_operator_upgrade_v1_27_to_mck.py index 4ed13d2a9..0b059386e 100644 --- a/docker/mongodb-kubernetes-tests/tests/upgrades/sharded_cluster_operator_upgrade_v1_27_to_mck.py +++ b/docker/mongodb-kubernetes-tests/tests/upgrades/sharded_cluster_operator_upgrade_v1_27_to_mck.py @@ -1,21 +1,16 @@ -from typing import Dict, Optional +from typing import Dict import pytest -from kubeobject import CustomObject -from kubernetes import client -from kubetester import create_or_update_configmap, read_configmap -from kubetester.certs import create_sharded_cluster_certs -from kubetester.kubetester import ensure_nested_objects +from kubetester import read_configmap, try_load +from kubetester.certs import create_mongodb_tls_certs, create_sharded_cluster_certs from kubetester.kubetester import fixture as yaml_fixture from kubetester.mongodb import MongoDB -from kubetester.mongotester import ShardedClusterTester -from kubetester.operator import Operator +from kubetester.mongotester import ReplicaSetTester, ShardedClusterTester from kubetester.phase import Phase from tests import test_logger from tests.conftest import ( LEGACY_OPERATOR_NAME, OPERATOR_NAME, - get_central_cluster_client, get_default_operator, install_legacy_deployment_state_meko, log_deployments_info, @@ -23,6 +18,7 @@ from tests.upgrades import downscale_operator_deployment MDB_RESOURCE = "sh001-base" +MDB_RS_RESOURCE = "rs" CERT_PREFIX = "prefix" logger = test_logger.get_test_logger(__name__) @@ -41,6 +37,8 @@ If the sharded cluster resource correctly reconciles after upgrade/downgrade and scaling steps, we assume it works correctly. """ + + # TODO CLOUDP-318100: this test should eventually be updated and not pinned to 1.27 anymore @@ -68,7 +66,7 @@ def server_certs(issuer: str, namespace: str) -> str: ) -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def sharded_cluster( issuer_ca_configmap: str, namespace: str, @@ -79,7 +77,11 @@ def sharded_cluster( yaml_fixture("sharded-cluster.yaml"), namespace=namespace, name=MDB_RESOURCE, - ) + ).configure(om=None, project_name=MDB_RESOURCE) + + if try_load(resource): + return resource + resource.set_version(custom_mdb_version) resource["spec"]["mongodsPerShardCount"] = 2 resource["spec"]["configServerCount"] = 2 @@ -87,7 +89,34 @@ def sharded_cluster( resource["spec"]["persistent"] = True resource.configure_custom_tls(issuer_ca_configmap, CERT_PREFIX) - return resource.update() + return resource + + +@pytest.fixture(scope="module") +def replica_set_certs(issuer: str, namespace: str): + return create_mongodb_tls_certs(issuer, namespace, MDB_RS_RESOURCE, f"prefix-{MDB_RS_RESOURCE}-cert") + + +@pytest.fixture(scope="module") +def replica_set( + issuer_ca_configmap: str, + namespace: str, + replica_set_certs: str, + custom_mdb_version: str, +): + resource = MongoDB.from_yaml( + yaml_fixture("replica-set-basic.yaml"), + namespace=namespace, + name=MDB_RS_RESOURCE, + ).configure(om=None, project_name=f"{MDB_RS_RESOURCE}") + + if try_load(resource): + return resource + + resource.set_version(custom_mdb_version) + resource.configure_custom_tls(issuer_ca_configmap, CERT_PREFIX) + + return resource @pytest.mark.e2e_sharded_cluster_operator_upgrade_v1_27_to_mck @@ -101,16 +130,23 @@ def test_install_legacy_deployment_state_meko( install_legacy_deployment_state_meko(namespace, managed_security_context, operator_installation_config) def test_create_sharded_cluster(self, sharded_cluster: MongoDB): + sharded_cluster.update() sharded_cluster.assert_reaches_phase(phase=Phase.Running, timeout=350) def test_scale_up_sharded_cluster(self, sharded_cluster: MongoDB): - sharded_cluster.load() sharded_cluster["spec"]["mongodsPerShardCount"] = 3 sharded_cluster["spec"]["configServerCount"] = 3 sharded_cluster.update() sharded_cluster.assert_reaches_phase(phase=Phase.Running, timeout=300) +@pytest.mark.e2e_sharded_cluster_operator_upgrade_v1_27_to_mck +class TestReplicaSetDeployment: + def test_create_replica_set(self, replica_set: MongoDB): + replica_set.update() + replica_set.assert_reaches_phase(phase=Phase.Running, timeout=350) + + @pytest.mark.e2e_sharded_cluster_operator_upgrade_v1_27_to_mck class TestOperatorUpgrade: @@ -137,6 +173,12 @@ def test_sharded_cluster_reconciled(self, sharded_cluster: MongoDB, namespace: s def test_assert_connectivity(self, ca_path: str): ShardedClusterTester(MDB_RESOURCE, 1, ssl=True, ca_path=ca_path).assert_connectivity() + def test_replica_set_reconciled(self, replica_set: MongoDB): + replica_set.assert_reaches_phase(phase=Phase.Running, timeout=850, ignore_errors=True) + + def test_assert_connectivity_replica_set(self, ca_path: str): + ReplicaSetTester(MDB_RS_RESOURCE, 3, ssl=True, ca_path=ca_path).assert_connectivity() + def test_scale_down_sharded_cluster(self, sharded_cluster: MongoDB, namespace: str): sharded_cluster.load() # Scale down both by 1 @@ -168,6 +210,12 @@ def test_sharded_cluster_reconciled(self, sharded_cluster: MongoDB): def test_assert_connectivity(self, ca_path: str): ShardedClusterTester(MDB_RESOURCE, 1, ssl=True, ca_path=ca_path).assert_connectivity() + def test_replica_set_reconciled(self, replica_set: MongoDB): + replica_set.assert_reaches_phase(phase=Phase.Running, timeout=850, ignore_errors=True) + + def test_assert_connectivity_replica_set(self, ca_path: str): + ReplicaSetTester(MDB_RS_RESOURCE, 3, ssl=True, ca_path=ca_path).assert_connectivity() + def test_scale_up_sharded_cluster(self, sharded_cluster: MongoDB): sharded_cluster.load() sharded_cluster["spec"]["mongodsPerShardCount"] = 3