From 187c4917f49e2293e01b0d2b329197193efb9f06 Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Thu, 21 Mar 2024 15:58:33 +0100 Subject: [PATCH] [DPE-3839] restoring backups with name mismatch (#381) ## Issue Migrating a cluster to a cluster with a new name is not supported ## Solution support that via passing an option for it --- actions.yaml | 8 +- lib/charms/mongodb/v1/mongodb_backups.py | 120 +++++++++++------- .../sharding_tests/test_sharding_backups.py | 88 ++++++++----- .../sharding_tests/writes_helpers.py | 4 +- tests/unit/test_mongodb_backups.py | 13 +- 5 files changed, 151 insertions(+), 82 deletions(-) diff --git a/actions.yaml b/actions.yaml index fdfc8b2f9..8fe4f2743 100644 --- a/actions.yaml +++ b/actions.yaml @@ -5,7 +5,8 @@ get-primary: description: Report primary replica get-password: - description: Fetch the password of the provided internal user of the charm, used for internal charm operations. + description: + Fetch the password of the provided internal user of the charm, used for internal charm operations. It is for internal charm users only, and SHOULD NOT be used by applications. params: username: @@ -39,6 +40,11 @@ restore: backup-id: type: string description: A backup-id to identify the backup to restore. Format of <%Y-%m-%dT%H:%M:%SZ> + remap-pattern: + type: string + description: + Optional, a pattern used to remap cluster component names when performing a restore. + Format of old_config_server_name=new_config_server_name,old_shard_name=new_shard_name set-tls-private-key: description: Set the privates key, which will be used for certificate signing requests (CSR). Run for each unit separately. diff --git a/lib/charms/mongodb/v1/mongodb_backups.py b/lib/charms/mongodb/v1/mongodb_backups.py index 53d46b81c..ee287df34 100644 --- a/lib/charms/mongodb/v1/mongodb_backups.py +++ b/lib/charms/mongodb/v1/mongodb_backups.py @@ -40,7 +40,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 logger = logging.getLogger(__name__) @@ -54,7 +54,7 @@ "storage-class": "storage.s3.storageClass", } S3_RELATION = "s3-credentials" -REMAPPING_PATTERN = r"\ABackup doesn't match current cluster topology - it has different replica set names. Extra shards in the backup will cause this, for a simple example. The extra/unknown replica set names found in the backup are: ([^,\s]+)([.] Backup has no data for the config server or sole replicaset)?\Z" +REMAPPING_PATTERN = r"\ABackup doesn't match current cluster topology - it has different replica set names. Extra shards in the backup will cause this, for a simple example. The extra/unknown replica set names found in the backup are: ([\w\d\-,\s]+)([.] Backup has no data for the config server or sole replicaset)?\Z" PBM_STATUS_CMD = ["status", "-o", "json"] MONGODB_SNAP_DATA_DIR = "/var/snap/charmed-mongodb/current" BACKUP_RESTORE_MAX_ATTEMPTS = 10 @@ -141,13 +141,13 @@ def _on_s3_credential_changed(self, event: CredentialsChangedEvent): return if not self.charm.db_initialised: - self._defer_action_with_info_log( + self._defer_event_with_info_log( event, action, "Set PBM credentials, MongoDB not ready." ) return if not self.charm.has_backup_service(): - self._defer_action_with_info_log( + self._defer_event_with_info_log( event, action, "Set PBM configurations, pbm-agent service not found." ) return @@ -181,7 +181,7 @@ def _on_create_backup_action(self, event) -> None: return if isinstance(pbm_status, WaitingStatus): - self._defer_action_with_info_log( + self._fail_action_with_error_log( event, action, "Sync-ing configurations needs more time, must wait before creating a backup.", @@ -215,7 +215,7 @@ def _on_list_backups_action(self, event) -> None: self.charm.unit.status = pbm_status if isinstance(pbm_status, WaitingStatus): - self._defer_action_with_info_log( + self._fail_action_with_error_log( event, action, "Sync-ing configurations needs more time, must wait before listing backups.", @@ -238,10 +238,32 @@ def _on_restore_action(self, event) -> None: if not self._pass_sanity_checks(event, action): return + if not self._restore_hook_checks(event): + return + + # sometimes when we are trying to restore pmb can be resyncing, so we need to retry + try: + backup_id = event.params.get("backup-id") + self._restore(backup_id, remapping_args=event.params.get("remap-pattern")) + self.charm.unit.status = MaintenanceStatus( + f"restore started/running, backup id:'{backup_id}'" + ) + self._success_action_with_info_log( + event, action, {"restore-status": "restore started"} + ) + except ResyncError: + raise + except RestoreError as restore_error: + self._fail_action_with_error_log(event, action, str(restore_error)) + + # BEGIN: helper functions + def _restore_hook_checks(self, event) -> bool: + """Runs pre-hook checks specific to running the restore command.""" + action = "restore" backup_id = event.params.get("backup-id") if not backup_id: self._fail_action_with_error_log(event, action, "Missing backup-id to restore") - return + return False # only leader can restore backups. This prevents multiple restores from being attempted at # once. @@ -249,7 +271,7 @@ def _on_restore_action(self, event) -> None: self._fail_action_with_error_log( event, action, "The action can be run only on leader unit." ) - return + return False # cannot restore backup if pbm is not ready. This could be due to: resyncing, incompatible, # options, incorrect credentials, creating a backup, or already performing a restore. @@ -259,37 +281,33 @@ def _on_restore_action(self, event) -> None: self._fail_action_with_error_log( event, action, "Please wait for current backup/restore to finish." ) - return + return False if isinstance(pbm_status, WaitingStatus): - self._defer_action_with_info_log( + self._fail_action_with_error_log( event, action, "Sync-ing configurations needs more time, must wait before restoring.", ) - return + return False if isinstance(pbm_status, BlockedStatus): self._fail_action_with_error_log( event, action, f"Cannot restore backup {pbm_status.message}." ) - return + return False - # sometimes when we are trying to restore pmb can be resyncing, so we need to retry - try: - self._try_to_restore(backup_id) - self.charm.unit.status = MaintenanceStatus( - f"restore started/running, backup id:'{backup_id}'" - ) - self._success_action_with_info_log( - event, action, {"restore-status": "restore started"} + if ( + self._needs_provided_remap_arguments(backup_id) + and event.params.get("remap-pattern") is None + ): + self._fail_action_with_error_log( + event, action, "Cannot restore backup, 'remap-pattern' must be set." ) - except ResyncError: - raise - except RestoreError as restore_error: - self._fail_action_with_error_log(event, action, str(restore_error)) + return False + + return True - # BEGIN: helper functions def is_valid_s3_integration(self) -> bool: """Return true if relation to s3-integrator is valid. @@ -337,13 +355,13 @@ def _configure_pbm_options(self, event) -> None: return except ResyncError: self.charm.unit.status = WaitingStatus("waiting to sync s3 configurations.") - self._defer_action_with_info_log( + self._defer_event_with_info_log( event, action, "Sync-ing configurations needs more time." ) return except PBMBusyError: self.charm.unit.status = WaitingStatus("waiting to sync s3 configurations.") - self._defer_action_with_info_log( + self._defer_event_with_info_log( event, action, "Cannot update configs while PBM is running, must wait for PBM action to finish.", @@ -496,7 +514,7 @@ def _generate_backup_list_output(self) -> str: if backup["status"] == "error": # backups from a different cluster have an error status, but they should show as # finished - if self._backup_from_different_cluster(backup.get("error", "")): + if self._is_backup_from_different_cluster(backup.get("error", "")): backup_status = "finished" else: # display reason for failure if available @@ -532,11 +550,11 @@ def _format_backup_list(self, backup_list: List[str]) -> str: return "\n".join(backups) - def _backup_from_different_cluster(self, backup_status: str) -> bool: + def _is_backup_from_different_cluster(self, backup_status: str) -> bool: """Returns if a given backup was made on a different cluster.""" return re.search(REMAPPING_PATTERN, backup_status) is not None - def _try_to_restore(self, backup_id: str) -> None: + def _restore(self, backup_id: str, remapping_args: Optional[str] = None) -> None: """Try to restore cluster a backup specified by backup id. If PBM is resyncing, the function will retry to create backup @@ -553,7 +571,10 @@ def _try_to_restore(self, backup_id: str) -> None: ): with attempt: try: - remapping_args = self._remap_replicaset(backup_id) + remapping_args = remapping_args or self._remap_replicaset(backup_id) + if remapping_args: + remapping_args = f"--replset-remapping {remapping_args}" + restore_cmd = ["restore", backup_id] if remapping_args: restore_cmd = restore_cmd + remapping_args.split(" ") @@ -619,34 +640,27 @@ def _remap_replicaset(self, backup_id: str) -> str: pbm_status = json.loads(pbm_status) # grab the error status from the backup if present - backups = pbm_status["backups"]["snapshot"] or [] - backup_status = "" - for backup in backups: - if not backup_id == backup["name"]: - continue + backup_error_status = self.get_backup_error_status(backup_id) - backup_status = backup.get("error", "") - break - - if not self._backup_from_different_cluster(backup_status): + if not self._is_backup_from_different_cluster(backup_error_status): return "" # TODO in the future when we support conf servers and shards this will need to be more # comprehensive. - old_cluster_name = re.search(REMAPPING_PATTERN, backup_status).group(1) + old_cluster_name = re.search(REMAPPING_PATTERN, backup_error_status).group(1) current_cluster_name = self.charm.app.name logger.debug( "Replica set remapping is necessary for restore, old cluster name: %s ; new cluster name: %s", old_cluster_name, current_cluster_name, ) - return f"--replset-remapping {current_cluster_name}={old_cluster_name}" + return f"{current_cluster_name}={old_cluster_name}" def _fail_action_with_error_log(self, event, action: str, message: str) -> None: logger.error("%s failed: %s", action.capitalize(), message) event.fail(message) - def _defer_action_with_info_log(self, event, action: str, message: str) -> None: + def _defer_event_with_info_log(self, event, action: str, message: str) -> None: logger.info("Deferring %s: %s", action, message) event.defer() @@ -733,3 +747,23 @@ def process_pbm_error(self, pbm_status: Optional[_StrOrBytes]) -> str: elif "status code: 301" in error_message: message = "s3 configurations are incompatible." return message + + def _needs_provided_remap_arguments(self, backup_id: str) -> bool: + """Returns true if remap arguments are needed to perform a restore command.""" + backup_error_status = self.get_backup_error_status(backup_id) + + # When a charm is running as a Replica set it can generate its own remapping arguments + return self._is_backup_from_different_cluster(backup_error_status) and self.charm.is_role( + Config.Role.CONFIG_SERVER + ) + + def get_backup_error_status(self, backup_id: str) -> str: + """Get the error status for a provided backup.""" + pbm_status = self.charm.run_pbm_command(["status", "--out=json"]) + pbm_status = json.loads(pbm_status) + backups = pbm_status["backups"].get("snapshot", []) + for backup in backups: + if backup_id == backup["name"]: + return backup.get("error", "") + + return "" diff --git a/tests/integration/sharding_tests/test_sharding_backups.py b/tests/integration/sharding_tests/test_sharding_backups.py index cf70b06b8..63a85aee8 100644 --- a/tests/integration/sharding_tests/test_sharding_backups.py +++ b/tests/integration/sharding_tests/test_sharding_backups.py @@ -18,10 +18,14 @@ S3_APP_NAME = "s3-integrator" SHARD_ONE_APP_NAME = "shard-one" +SHARD_ONE_APP_NAME_NEW = "shard-one-new" SHARD_TWO_APP_NAME = "shard-two" +SHARD_TWO_APP_NAME_NEW = "shard-two-new" CONFIG_SERVER_APP_NAME = "config-server" +CONFIG_SERVER_APP_NAME_NEW = "config-server-new" SHARD_APPS = [SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME] CLUSTER_APPS = [SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME, CONFIG_SERVER_APP_NAME] +CLUSTER_APPS_NEW = [SHARD_ONE_APP_NAME_NEW, SHARD_TWO_APP_NAME_NEW, CONFIG_SERVER_APP_NAME_NEW] SHARD_REL_NAME = "sharding" CONFIG_SERVER_REL_NAME = "config-server" S3_REL_NAME = "s3-credentials" @@ -319,52 +323,66 @@ async def test_migrate_restore_backup(ops_test: OpsTest, add_writes_to_shards) - # Destroy the old cluster and create a new cluster with the same exact topology and password await destroy_cluster_backup_test(ops_test) - await deploy_cluster_backup_test(ops_test, deploy_s3_integrator=False) - await setup_cluster_and_s3(ops_test) - config_leader_id = await get_leader_id(ops_test, app_name=CONFIG_SERVER_APP_NAME) + await deploy_cluster_backup_test(ops_test, deploy_s3_integrator=False, new_names=True) + await setup_cluster_and_s3(ops_test, new_names=True) + config_leader_id = await get_leader_id(ops_test, app_name=CONFIG_SERVER_APP_NAME_NEW) await set_password( ops_test, unit_id=config_leader_id, username="operator", password=OPERATOR_PASSWORD ) await ops_test.model.wait_for_idle( - apps=CLUSTER_APPS, status="active", idle_period=20, timeout=TIMEOUT + apps=CLUSTER_APPS_NEW, status="active", idle_period=20, timeout=TIMEOUT ) # find most recent backup id and restore leader_unit = await backup_helpers.get_leader_unit( - ops_test, db_app_name=CONFIG_SERVER_APP_NAME + ops_test, db_app_name=CONFIG_SERVER_APP_NAME_NEW ) - - # find most recent backup id and restore list_result = await backup_helpers.get_backup_list( - ops_test, db_app_name=CONFIG_SERVER_APP_NAME + ops_test, db_app_name=CONFIG_SERVER_APP_NAME_NEW ) most_recent_backup = list_result.split("\n")[-1] backup_id = most_recent_backup.split()[0] + action = await leader_unit.run_action(action_name="restore", **{"backup-id": backup_id}) + attempted_restore = await action.wait() + assert ( + attempted_restore.status == "failed" + ), "config-server ran restore without necessary remapping." + + remap_pattern = f"{CONFIG_SERVER_APP_NAME_NEW}={CONFIG_SERVER_APP_NAME},{SHARD_ONE_APP_NAME_NEW}={SHARD_ONE_APP_NAME},{SHARD_TWO_APP_NAME_NEW}={SHARD_TWO_APP_NAME}" + action = await leader_unit.run_action( + action_name="restore", **{"backup-id": backup_id, "remap-pattern": remap_pattern} + ) restore = await action.wait() assert restore.results["restore-status"] == "restore started", "restore not successful" await ops_test.model.wait_for_idle( - apps=[CONFIG_SERVER_APP_NAME], status="active", idle_period=20 + apps=[CONFIG_SERVER_APP_NAME_NEW], status="active", idle_period=20 ), - await verify_writes_restored(ops_test, cluster_writes) + await verify_writes_restored(ops_test, cluster_writes, new_names=True) -async def deploy_cluster_backup_test(ops_test: OpsTest, deploy_s3_integrator=True) -> None: +async def deploy_cluster_backup_test( + ops_test: OpsTest, deploy_s3_integrator=True, new_names=False +) -> None: """Deploy a cluster for the backup test.""" my_charm = await ops_test.build_charm(".") + + config_server_name = CONFIG_SERVER_APP_NAME if not new_names else CONFIG_SERVER_APP_NAME_NEW + shard_one_name = SHARD_ONE_APP_NAME if not new_names else SHARD_ONE_APP_NAME_NEW + shard_two_name = SHARD_TWO_APP_NAME if not new_names else SHARD_TWO_APP_NAME_NEW await ops_test.model.deploy( my_charm, num_units=2, config={"role": "config-server"}, - application_name=CONFIG_SERVER_APP_NAME, + application_name=config_server_name, ) await ops_test.model.deploy( - my_charm, num_units=2, config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME + my_charm, num_units=2, config={"role": "shard"}, application_name=shard_one_name ) await ops_test.model.deploy( - my_charm, num_units=1, config={"role": "shard"}, application_name=SHARD_TWO_APP_NAME + my_charm, num_units=1, config={"role": "shard"}, application_name=shard_two_name ) # deploy the s3 integrator charm @@ -372,7 +390,7 @@ async def deploy_cluster_backup_test(ops_test: OpsTest, deploy_s3_integrator=Tru await ops_test.model.deploy(S3_APP_NAME, channel="edge") await ops_test.model.wait_for_idle( - apps=[S3_APP_NAME, CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME], + apps=[S3_APP_NAME, config_server_name, shard_one_name, shard_two_name], idle_period=20, raise_on_blocked=False, timeout=TIMEOUT, @@ -380,28 +398,32 @@ async def deploy_cluster_backup_test(ops_test: OpsTest, deploy_s3_integrator=Tru ) -async def setup_cluster_and_s3(ops_test: OpsTest) -> None: +async def setup_cluster_and_s3(ops_test: OpsTest, new_names=False) -> None: """Deploy a cluster for the backup test.""" + config_server_name = CONFIG_SERVER_APP_NAME if not new_names else CONFIG_SERVER_APP_NAME_NEW + shard_one_name = SHARD_ONE_APP_NAME if not new_names else SHARD_ONE_APP_NAME_NEW + shard_two_name = SHARD_TWO_APP_NAME if not new_names else SHARD_TWO_APP_NAME_NEW + # provide config-server to entire cluster and s3-integrator to config-server - integrations # made in succession to test race conditions. await ops_test.model.integrate( f"{S3_APP_NAME}:{S3_REL_NAME}", - f"{CONFIG_SERVER_APP_NAME}:{S3_REL_NAME}", + f"{config_server_name}:{S3_REL_NAME}", ) await ops_test.model.integrate( - f"{SHARD_ONE_APP_NAME}:{SHARD_REL_NAME}", - f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + f"{shard_one_name}:{SHARD_REL_NAME}", + f"{config_server_name}:{CONFIG_SERVER_REL_NAME}", ) await ops_test.model.integrate( - f"{SHARD_TWO_APP_NAME}:{SHARD_REL_NAME}", - f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}", + f"{shard_two_name}:{SHARD_REL_NAME}", + f"{config_server_name}:{CONFIG_SERVER_REL_NAME}", ) await ops_test.model.wait_for_idle( apps=[ - CONFIG_SERVER_APP_NAME, - SHARD_ONE_APP_NAME, - SHARD_TWO_APP_NAME, + config_server_name, + shard_one_name, + shard_two_name, ], idle_period=20, status="active", @@ -463,24 +485,30 @@ async def add_and_verify_unwanted_writes(ops_test, old_cluster_writes: Dict) -> ), "No writes to be cleared on shard-two after restoring." -async def verify_writes_restored(ops_test, exppected_cluster_writes: Dict) -> None: +async def verify_writes_restored( + ops_test, exppected_cluster_writes: Dict, new_names=False +) -> None: """Verify that writes were correctly restored.""" + shard_one_name = SHARD_ONE_APP_NAME if not new_names else SHARD_ONE_APP_NAME_NEW + shard_two_name = SHARD_TWO_APP_NAME if not new_names else SHARD_TWO_APP_NAME_NEW + shard_apps = [shard_one_name, shard_two_name] + # verify all writes are present for attempt in Retrying(stop=stop_after_delay(4), wait=wait_fixed(20), reraise=True): with attempt: restored_total_writes = await writes_helpers.get_cluster_writes_count( ops_test, - shard_app_names=SHARD_APPS, + shard_app_names=shard_apps, db_names=[SHARD_ONE_DB_NAME, SHARD_TWO_DB_NAME], ) assert ( restored_total_writes["total_writes"] == exppected_cluster_writes["total_writes"] ), "writes not correctly restored to whole cluster" assert ( - restored_total_writes[SHARD_ONE_APP_NAME] + restored_total_writes[shard_one_name] == exppected_cluster_writes[SHARD_ONE_APP_NAME] - ), f"writes not correctly restored to {SHARD_ONE_APP_NAME}" + ), f"writes not correctly restored to {shard_one_name}" assert ( - restored_total_writes[SHARD_TWO_APP_NAME] + restored_total_writes[shard_two_name] == exppected_cluster_writes[SHARD_TWO_APP_NAME] - ), f"writes not correctly restored to {SHARD_TWO_APP_NAME}" + ), f"writes not correctly restored to {shard_two_name}" diff --git a/tests/integration/sharding_tests/writes_helpers.py b/tests/integration/sharding_tests/writes_helpers.py index 51091c00c..b2aab9530 100644 --- a/tests/integration/sharding_tests/writes_helpers.py +++ b/tests/integration/sharding_tests/writes_helpers.py @@ -17,6 +17,7 @@ MONGOS_PORT = 27018 MONGOD_PORT = 27017 APP_NAME = "config-server" +APP_NAME_NEW = "config-server-new" logger = logging.getLogger(__name__) @@ -44,10 +45,10 @@ async def remove_db_writes( ops_test: OpsTest, db_name: str, coll_name: str, - config_server_name=APP_NAME, ) -> bool: """Stop the DB process and remove any writes to the test collection.""" # remove collection from database + config_server_name = APP_NAME if APP_NAME in ops_test.model.applications else APP_NAME_NEW connection_string = await mongos_uri(ops_test, config_server_name) client = MongoClient(connection_string) @@ -102,7 +103,6 @@ async def count_shard_writes( ops_test: OpsTest, shard_app_name=APP_NAME, db_name="new-db", collection_name="test_collection" ) -> int: """New versions of pymongo no longer support the count operation, instead find is used.""" - connection_string = await mongos_uri(ops_test, shard_app_name) password = await get_password(ops_test, app_name=shard_app_name) hosts = [ f"{unit.public_address}:{MONGOD_PORT}" diff --git a/tests/unit/test_mongodb_backups.py b/tests/unit/test_mongodb_backups.py index 621edda58..1da4d8fb7 100644 --- a/tests/unit/test_mongodb_backups.py +++ b/tests/unit/test_mongodb_backups.py @@ -447,7 +447,7 @@ def test_backup_list_syncing(self, pbm_command, service): self.harness.add_relation(RELATION_NAME, "s3-integrator") self.harness.charm.backups._on_list_backups_action(action_event) - action_event.defer.assert_called() + action_event.fail.assert_called() @patch("charm.MongodbOperatorCharm.has_backup_service") @patch("charm.MongodbOperatorCharm.run_pbm_command") @@ -561,7 +561,7 @@ def test_restore_syncing(self, pbm_command, service): self.harness.add_relation(RELATION_NAME, "s3-integrator") self.harness.charm.backups._on_restore_action(action_event) - action_event.defer.assert_called() + action_event.fail.assert_called() @patch("charm.MongodbOperatorCharm.has_backup_service") @patch("charm.MongodbOperatorCharm.run_pbm_command") @@ -600,14 +600,15 @@ def test_restore_wrong_cred(self, pbm_status, pbm_command, service): @patch("charm.MongodbOperatorCharm.has_backup_service") @patch("charm.MongodbOperatorCharm.run_pbm_command") @patch("charm.MongoDBBackups.get_pbm_status") - def test_restore_failed(self, pbm_status, pbm_command, service): + @patch("charm.MongoDBBackups._needs_provided_remap_arguments") + def test_restore_failed(self, remap, pbm_status, pbm_command, service): """Verifies restore is fails if the pbm command failed.""" action_event = mock.Mock() action_event.params = {"backup-id": "back-me-up"} pbm_status.return_value = ActiveStatus("") pbm_command.side_effect = ExecError( - command=["/usr/bin/pbm", "list"], exit_code=1, stdout="failed", stderr="" + command=["/usr/bin/pbm", "restore"], exit_code=1, stdout="failed", stderr="" ) self.harness.add_relation(RELATION_NAME, "s3-integrator") @@ -660,7 +661,7 @@ def test_remap_replicaset_remap_necessary(self, run_pbm_command): # first case is that the backup is not in the error state remap = self.harness.charm.backups._remap_replicaset("2002-02-14T13:59:14Z") - self.assertEqual(remap, "--replset-remapping current-app-name=old-cluster-name") + self.assertEqual(remap, "current-app-name=old-cluster-name") @patch("charm.MongodbOperatorCharm.has_backup_service") @patch("charm.MongodbOperatorCharm.run_pbm_command") @@ -695,7 +696,7 @@ def test_backup_syncing(self, run_pbm_command, service): self.harness.add_relation(RELATION_NAME, "s3-integrator") self.harness.charm.backups._on_create_backup_action(action_event) - action_event.defer.assert_called() + action_event.fail.assert_called() @patch("charm.MongodbOperatorCharm.has_backup_service") @patch("charm.MongodbOperatorCharm.run_pbm_command")