From 7d7e80f804de08effebf43f97d4f4ccbe16c2f0f Mon Sep 17 00:00:00 2001 From: Mia Altieri <32723809+MiaAltieri@users.noreply.github.com> Date: Fri, 12 Jan 2024 10:14:57 -0800 Subject: [PATCH] [DPE-3182] mongodb remove user (#334) ## Issue When relation is broken between `mongos` charm and `config-server` charm the user created for the `mongos` application is still accessible ## Solution When relation is broken between mongos charm and config-sever process users ## Tested by hand ``` # deploy both charms cd ~/mongos-operator # juju add-model test-0 tox -e build juju deploy ./*charm cd ~/mongos-operator/tests/integration/application charmcraft pack juju deploy ./*charm -n2 # get shared cluster up and running cd ~/mongodb-operator charmcraft pack juju deploy ./*charm --config role="config-server" config-server -n2 juju deploy ./*charm --config role="shard" shard # integrate juju integrate mongos application juju integrate config-server:config-server shard:sharding juju integrate config-server:cluster mongos:cluster # get URI juju show-unit application/0 juju show-secret --reveal # access user juju ssh application/0 sudo charmed-mongodb.mongosh # scale application and repeat access user step juju remove-unit config-server/1 juju remove-unit application/0 # remove relation and repeat access user step (except this time verify we cannot access the user) juju remove-relation config-server:cluster mongos:cluster ``` ## Future PRs 1. [user int tests](https://warthogs.atlassian.net/browse/DPE-3292) 2. [have mongos sub-charm remove connection info for host application]() --- .../mongodb/v0/config_server_interface.py | 32 +++++++++++++++++-- lib/charms/mongodb/v1/shards_interface.py | 26 ++------------- src/charm.py | 26 +++++++++++++-- 3 files changed, 55 insertions(+), 29 deletions(-) diff --git a/lib/charms/mongodb/v0/config_server_interface.py b/lib/charms/mongodb/v0/config_server_interface.py index 65242d9b3..47100d71a 100644 --- a/lib/charms/mongodb/v0/config_server_interface.py +++ b/lib/charms/mongodb/v0/config_server_interface.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 2 +LIBPATCH = 3 class ClusterProvider(Object): @@ -54,8 +54,13 @@ def __init__( charm.on[self.relation_name].relation_changed, self._on_relation_changed ) - # TODO Future PRs handle scale down - # TODO Future PRs handle changing of units/passwords to be propagated to mongos + self.framework.observe( + charm.on[self.relation_name].relation_departed, + self.charm.check_relation_broken_or_scale_down, + ) + self.framework.observe( + charm.on[self.relation_name].relation_broken, self._on_relation_broken + ) def pass_hook_checks(self, event: EventBase) -> bool: """Runs the pre-hooks checks for ClusterProvider, returns True if all pass.""" @@ -97,6 +102,26 @@ def _on_relation_changed(self, event) -> None: }, ) + def _on_relation_broken(self, event) -> None: + # Only relation_deparated events can check if scaling down + departed_relation_id = event.relation.id + if not self.charm.has_departed_run(departed_relation_id): + logger.info( + "Deferring, must wait for relation departed hook to decide if relation should be removed." + ) + event.defer() + return + + if not self.pass_hook_checks(event): + logger.info("Skipping relation broken event: hook checks did not pass") + return + + if not self.charm.proceed_on_broken_event(event): + logger.info("Skipping relation broken event, broken event due to scale down") + return + + self.charm.client_relations.oversee_users(departed_relation_id, event) + def update_config_server_db(self, event): """Provides related mongos applications with new config server db.""" if not self.pass_hook_checks(event): @@ -157,6 +182,7 @@ def __init__( self.framework.observe( charm.on[self.relation_name].relation_changed, self._on_relation_changed ) + # TODO Future PRs handle scale down def _on_database_created(self, event) -> None: diff --git a/lib/charms/mongodb/v1/shards_interface.py b/lib/charms/mongodb/v1/shards_interface.py index da7cc8320..4f62b418e 100644 --- a/lib/charms/mongodb/v1/shards_interface.py +++ b/lib/charms/mongodb/v1/shards_interface.py @@ -51,7 +51,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 2 +LIBPATCH = 3 KEYFILE_KEY = "key-file" HOSTS_KEY = "host" OPERATOR_PASSWORD_KEY = MongoDBUser.get_password_key_name_for_user(OperatorUser.get_username()) @@ -148,28 +148,6 @@ def pass_hook_checks(self, event: EventBase) -> bool: return True - def _proceed_on_broken_event(self, event) -> int: - """Returns relation_id if relation broken event occurred due to a removed relation.""" - departed_relation_id = None - - # Only relation_deparated events can check if scaling down - departed_relation_id = event.relation.id - if not self.charm.has_departed_run(departed_relation_id): - logger.info( - "Deferring, must wait for relation departed hook to decide if relation should be removed." - ) - event.defer() - return - - # check if were scaling down and add a log message - if self.charm.is_scaling_down(event.relation.id): - logger.info( - "Relation broken event occurring due to scale down, do not proceed to remove users." - ) - return - - return departed_relation_id - def _on_relation_event(self, event): """Handles adding and removing of shards. @@ -181,7 +159,7 @@ def _on_relation_event(self, event): departed_relation_id = None if isinstance(event, RelationBrokenEvent): - departed_relation_id = self._proceed_on_broken_event(event) + departed_relation_id = self.charm.proceed_on_broken_event(event) if not departed_relation_id: return diff --git a/src/charm.py b/src/charm.py index 288f5b953..e8cd654ad 100755 --- a/src/charm.py +++ b/src/charm.py @@ -1319,10 +1319,32 @@ def set_scaling_down(self, event: RelationDepartedEvent) -> bool: # check if relation departed is due to current unit being removed. (i.e. scaling down the # application.) rel_departed_key = self._generate_relation_departed_key(event.relation.id) - scaling_down = json.dumps(event.departing_unit == self.unit) - self.unit_peer_data[rel_departed_key] = scaling_down + scaling_down = event.departing_unit == self.unit + self.unit_peer_data[rel_departed_key] = json.dumps(scaling_down) return scaling_down + def proceed_on_broken_event(self, event) -> int: + """Returns relation_id if relation broken event occurred due to a removed relation.""" + departed_relation_id = None + + # Only relation_deparated events can check if scaling down + departed_relation_id = event.relation.id + if not self.has_departed_run(departed_relation_id): + logger.info( + "Deferring, must wait for relation departed hook to decide if relation should be removed." + ) + event.defer() + return + + # check if were scaling down and add a log message + if self.is_scaling_down(departed_relation_id): + logger.info( + "Relation broken event occurring due to scale down, do not proceed to remove users." + ) + return + + return departed_relation_id + @staticmethod def _generate_relation_departed_key(rel_id: int) -> str: """Generates the relation departed key for a specified relation id."""