Skip to content

Commit

Permalink
[DPE-3182] mongodb remove user (#334)
Browse files Browse the repository at this point in the history
## Issue
When relation is broken between `mongos` charm and `config-server` charm
the user created for the `mongos` application is still accessible

## Solution
When relation is broken between mongos charm and config-sever process
users

## Tested by hand
```
 # deploy both charms
cd ~/mongos-operator
# juju add-model test-0
tox -e build
juju deploy ./*charm

cd ~/mongos-operator/tests/integration/application
charmcraft pack 
juju deploy ./*charm -n2

# get shared cluster up and running
cd ~/mongodb-operator
charmcraft pack
juju deploy ./*charm --config role="config-server" config-server -n2
juju deploy ./*charm --config role="shard" shard

# integrate
juju integrate mongos application
juju integrate config-server:config-server shard:sharding
juju integrate config-server:cluster mongos:cluster

# get URI
juju show-unit application/0
juju show-secret --reveal <secret URI>

# access user
juju ssh application/0
sudo charmed-mongodb.mongosh <URI>

# scale application and repeat access user step
juju remove-unit config-server/1
juju remove-unit application/0

# remove relation and  repeat access user step (except this time verify we cannot access the user)
juju remove-relation  config-server:cluster mongos:cluster
```

## Future PRs
1. [user int tests](https://warthogs.atlassian.net/browse/DPE-3292)
2. [have mongos sub-charm remove connection info for host application]()
  • Loading branch information
MiaAltieri authored Jan 12, 2024
1 parent 1048c2a commit 7d7e80f
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 29 deletions.
32 changes: 29 additions & 3 deletions lib/charms/mongodb/v0/config_server_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
LIBPATCH = 3


class ClusterProvider(Object):
Expand All @@ -54,8 +54,13 @@ def __init__(
charm.on[self.relation_name].relation_changed, self._on_relation_changed
)

# TODO Future PRs handle scale down
# TODO Future PRs handle changing of units/passwords to be propagated to mongos
self.framework.observe(
charm.on[self.relation_name].relation_departed,
self.charm.check_relation_broken_or_scale_down,
)
self.framework.observe(
charm.on[self.relation_name].relation_broken, self._on_relation_broken
)

def pass_hook_checks(self, event: EventBase) -> bool:
"""Runs the pre-hooks checks for ClusterProvider, returns True if all pass."""
Expand Down Expand Up @@ -97,6 +102,26 @@ def _on_relation_changed(self, event) -> None:
},
)

def _on_relation_broken(self, event) -> None:
# Only relation_deparated events can check if scaling down
departed_relation_id = event.relation.id
if not self.charm.has_departed_run(departed_relation_id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return

if not self.pass_hook_checks(event):
logger.info("Skipping relation broken event: hook checks did not pass")
return

if not self.charm.proceed_on_broken_event(event):
logger.info("Skipping relation broken event, broken event due to scale down")
return

self.charm.client_relations.oversee_users(departed_relation_id, event)

def update_config_server_db(self, event):
"""Provides related mongos applications with new config server db."""
if not self.pass_hook_checks(event):
Expand Down Expand Up @@ -157,6 +182,7 @@ def __init__(
self.framework.observe(
charm.on[self.relation_name].relation_changed, self._on_relation_changed
)

# TODO Future PRs handle scale down

def _on_database_created(self, event) -> None:
Expand Down
26 changes: 2 additions & 24 deletions lib/charms/mongodb/v1/shards_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
LIBPATCH = 3
KEYFILE_KEY = "key-file"
HOSTS_KEY = "host"
OPERATOR_PASSWORD_KEY = MongoDBUser.get_password_key_name_for_user(OperatorUser.get_username())
Expand Down Expand Up @@ -148,28 +148,6 @@ def pass_hook_checks(self, event: EventBase) -> bool:

return True

def _proceed_on_broken_event(self, event) -> int:
"""Returns relation_id if relation broken event occurred due to a removed relation."""
departed_relation_id = None

# Only relation_deparated events can check if scaling down
departed_relation_id = event.relation.id
if not self.charm.has_departed_run(departed_relation_id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return

# check if were scaling down and add a log message
if self.charm.is_scaling_down(event.relation.id):
logger.info(
"Relation broken event occurring due to scale down, do not proceed to remove users."
)
return

return departed_relation_id

def _on_relation_event(self, event):
"""Handles adding and removing of shards.
Expand All @@ -181,7 +159,7 @@ def _on_relation_event(self, event):

departed_relation_id = None
if isinstance(event, RelationBrokenEvent):
departed_relation_id = self._proceed_on_broken_event(event)
departed_relation_id = self.charm.proceed_on_broken_event(event)
if not departed_relation_id:
return

Expand Down
26 changes: 24 additions & 2 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,10 +1319,32 @@ def set_scaling_down(self, event: RelationDepartedEvent) -> bool:
# check if relation departed is due to current unit being removed. (i.e. scaling down the
# application.)
rel_departed_key = self._generate_relation_departed_key(event.relation.id)
scaling_down = json.dumps(event.departing_unit == self.unit)
self.unit_peer_data[rel_departed_key] = scaling_down
scaling_down = event.departing_unit == self.unit
self.unit_peer_data[rel_departed_key] = json.dumps(scaling_down)
return scaling_down

def proceed_on_broken_event(self, event) -> int:
"""Returns relation_id if relation broken event occurred due to a removed relation."""
departed_relation_id = None

# Only relation_deparated events can check if scaling down
departed_relation_id = event.relation.id
if not self.has_departed_run(departed_relation_id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return

# check if were scaling down and add a log message
if self.is_scaling_down(departed_relation_id):
logger.info(
"Relation broken event occurring due to scale down, do not proceed to remove users."
)
return

return departed_relation_id

@staticmethod
def _generate_relation_departed_key(rel_id: int) -> str:
"""Generates the relation departed key for a specified relation id."""
Expand Down

0 comments on commit 7d7e80f

Please sign in to comment.