Skip to content

Commit

Permalink
Merge branch '6/edge' into stricter-tls
Browse files Browse the repository at this point in the history
  • Loading branch information
MiaAltieri authored Jan 18, 2024
2 parents 383fe98 + 678f839 commit 18d56d0
Show file tree
Hide file tree
Showing 23 changed files with 1,251 additions and 726 deletions.
1 change: 0 additions & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ jobs:
with:
provider: lxd
juju-channel: 3.1/stable
bootstrap-options: "--agent-version 3.1.6"
- name: Download packed charm(s)
uses: actions/download-artifact@v3
with:
Expand Down
75 changes: 65 additions & 10 deletions lib/charms/mongodb/v0/config_server_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
)
from charms.mongodb.v1.helpers import add_args_to_env, get_mongos_args
from charms.mongodb.v1.mongos import MongosConnection
from ops.charm import CharmBase, EventBase
from ops.charm import CharmBase, EventBase, RelationBrokenEvent
from ops.framework import Object
from ops.model import ActiveStatus, MaintenanceStatus, WaitingStatus

Expand All @@ -35,7 +35,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
LIBPATCH = 5


class ClusterProvider(Object):
Expand All @@ -54,11 +54,21 @@ def __init__(
charm.on[self.relation_name].relation_changed, self._on_relation_changed
)

# TODO Future PRs handle scale down
# TODO Future PRs handle changing of units/passwords to be propagated to mongos
self.framework.observe(
charm.on[self.relation_name].relation_departed,
self.charm.check_relation_broken_or_scale_down,
)
self.framework.observe(
charm.on[self.relation_name].relation_broken, self._on_relation_broken
)

def pass_hook_checks(self, event: EventBase) -> bool:
"""Runs the pre-hooks checks for ClusterProvider, returns True if all pass."""
if not self.charm.db_initialised:
logger.info("Deferring %s. db is not initialised.", type(event))
event.defer()
return False

if not self.charm.is_role(Config.Role.CONFIG_SERVER):
logger.info(
"Skipping %s. ShardingProvider is only be executed by config-server", type(event)
Expand All @@ -68,11 +78,6 @@ def pass_hook_checks(self, event: EventBase) -> bool:
if not self.charm.unit.is_leader():
return False

if not self.charm.db_initialised:
logger.info("Deferring %s. db is not initialised.", type(event))
event.defer()
return False

return True

def _on_relation_changed(self, event) -> None:
Expand All @@ -97,6 +102,26 @@ def _on_relation_changed(self, event) -> None:
},
)

def _on_relation_broken(self, event) -> None:
# Only relation_deparated events can check if scaling down
departed_relation_id = event.relation.id
if not self.charm.has_departed_run(departed_relation_id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return

if not self.pass_hook_checks(event):
logger.info("Skipping relation broken event: hook checks did not pass")
return

if not self.charm.proceed_on_broken_event(event):
logger.info("Skipping relation broken event, broken event due to scale down")
return

self.charm.client_relations.oversee_users(departed_relation_id, event)

def update_config_server_db(self, event):
"""Provides related mongos applications with new config server db."""
if not self.pass_hook_checks(event):
Expand Down Expand Up @@ -157,7 +182,13 @@ def __init__(
self.framework.observe(
charm.on[self.relation_name].relation_changed, self._on_relation_changed
)
# TODO Future PRs handle scale down
self.framework.observe(
charm.on[self.relation_name].relation_departed,
self.charm.check_relation_broken_or_scale_down,
)
self.framework.observe(
charm.on[self.relation_name].relation_broken, self._on_relation_broken
)

def _on_database_created(self, event) -> None:
if not self.charm.unit.is_leader():
Expand Down Expand Up @@ -202,6 +233,30 @@ def _on_relation_changed(self, event) -> None:

self.charm.unit.status = ActiveStatus()

def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
# Only relation_deparated events can check if scaling down
if not self.charm.has_departed_run(event.relation.id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return

if not self.charm.proceed_on_broken_event(event):
logger.info("Skipping relation broken event, broken event due to scale down")
return

self.charm.stop_mongos_service()
logger.info("Stopped mongos daemon")

if not self.charm.unit.is_leader():
return

logger.info("Database and user removed for mongos application")
self.charm.remove_secret(Config.Relations.APP_SCOPE, Config.Secrets.USERNAME)
self.charm.remove_secret(Config.Relations.APP_SCOPE, Config.Secrets.PASSWORD)
self.charm.remove_connection_info()

# BEGIN: helper functions

def is_mongos_running(self) -> bool:
Expand Down
18 changes: 16 additions & 2 deletions lib/charms/mongodb/v0/mongodb_secrets.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from ops import Secret, SecretInfo
from ops.charm import CharmBase
from ops.model import SecretNotFoundError
from ops.model import ModelError, SecretNotFoundError

from config import Config
from exceptions import SecretAlreadyExistsError
Expand Down Expand Up @@ -93,7 +93,21 @@ def get_content(self) -> Dict[str, str]:
"""Getting cached secret content."""
if not self._secret_content:
if self.meta:
self._secret_content = self.meta.get_content()
try:
self._secret_content = self.meta.get_content(refresh=True)
except (ValueError, ModelError) as err:
# https://bugs.launchpad.net/juju/+bug/2042596
# Only triggered when 'refresh' is set
known_model_errors = [
"ERROR either URI or label should be used for getting an owned secret but not both",
"ERROR secret owner cannot use --refresh",
]
if isinstance(err, ModelError) and not any(
msg in str(err) for msg in known_model_errors
):
raise
# Due to: ValueError: Secret owner cannot use refresh=True
self._secret_content = self.meta.get_content()
return self._secret_content

def set_content(self, content: Dict[str, str]) -> None:
Expand Down
17 changes: 14 additions & 3 deletions lib/charms/mongodb/v1/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,22 @@
MONGO_SHELL = "charmed-mongodb.mongosh"

DATA_DIR = "/var/lib/mongodb"
LOG_DIR = "/var/log/mongodb"
LOG_TO_SYSLOG = True
CONF_DIR = "/etc/mongod"
MONGODB_LOG_FILENAME = "mongodb.log"
logger = logging.getLogger(__name__)


def _get_logging_options(snap_install: bool) -> str:
# TODO sending logs to syslog until we have a separate mount point for logs
if LOG_TO_SYSLOG:
return ""
# in k8s the default logging options that are used for the vm charm are ignored and logs are
# the output of the container. To enable logging to a file it must be set explicitly
return f"--logpath={LOG_DIR}/{MONGODB_LOG_FILENAME}" if snap_install else ""


# noinspection GrazieInspection
def get_create_user_cmd(config: MongoDBConfiguration, mongo_path=MONGO_SHELL) -> List[str]:
"""Creates initial admin user for MongoDB.
Expand Down Expand Up @@ -130,9 +141,7 @@ def get_mongod_args(
"""
full_data_dir = f"{MONGODB_COMMON_DIR}{DATA_DIR}" if snap_install else DATA_DIR
full_conf_dir = f"{MONGODB_SNAP_DATA_DIR}{CONF_DIR}" if snap_install else CONF_DIR
# in k8s the default logging options that are used for the vm charm are ignored and logs are
# the output of the container. To enable logging to a file it must be set explicitly
logging_options = "" if snap_install else f"--logpath={full_data_dir}/{MONGODB_LOG_FILENAME}"
logging_options = _get_logging_options(snap_install)
cmd = [
# bind to localhost and external interfaces
"--bind_ip_all",
Expand All @@ -143,6 +152,8 @@ def get_mongod_args(
# for simplicity we run the mongod daemon on shards, configsvrs, and replicas on the same
# port
f"--port={Config.MONGODB_PORT}",
"--auditDestination=syslog", # TODO sending logs to syslog until we have a separate mount point for logs
f"--auditFormat={Config.AuditLog.FORMAT}",
logging_options,
]
if auth:
Expand Down
12 changes: 6 additions & 6 deletions lib/charms/mongodb/v1/mongodb_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 3
LIBPATCH = 4

logger = logging.getLogger(__name__)
REL_NAME = "database"
Expand Down Expand Up @@ -87,6 +87,11 @@ def __init__(self, charm: CharmBase, substrate="k8s", relation_name: str = "data

def pass_hook_checks(self) -> bool:
"""Runs the pre-hooks checks for MongoDBProvider, returns True if all pass."""
# We shouldn't try to create or update users if the database is not
# initialised. We will create users as part of initialisation.
if not self.charm.db_initialised:
return False

if not self.charm.is_relation_feasible(self.relation_name):
logger.info("Skipping code for relations.")
return False
Expand All @@ -100,11 +105,6 @@ def pass_hook_checks(self) -> bool:
if not self.charm.unit.is_leader():
return False

# We shouldn't try to create or update users if the database is not
# initialised. We will create users as part of initialisation.
if not self.charm.db_initialised:
return False

return True

def _on_relation_event(self, event):
Expand Down
57 changes: 22 additions & 35 deletions lib/charms/mongodb/v1/shards_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 2
LIBPATCH = 5
KEYFILE_KEY = "key-file"
HOSTS_KEY = "host"
OPERATOR_PASSWORD_KEY = MongoDBUser.get_password_key_name_for_user(OperatorUser.get_username())
Expand Down Expand Up @@ -121,6 +121,11 @@ def _on_relation_joined(self, event):

def pass_hook_checks(self, event: EventBase) -> bool:
"""Runs the pre-hooks checks for ShardingProvider, returns True if all pass."""
if not self.charm.db_initialised:
logger.info("Deferring %s. db is not initialised.", type(event))
event.defer()
return False

if not self.charm.is_relation_feasible(self.relation_name):
logger.info("Skipping event %s , relation not feasible.", type(event))
return False
Expand All @@ -134,41 +139,25 @@ def pass_hook_checks(self, event: EventBase) -> bool:
if not self.charm.unit.is_leader():
return False

if not self.charm.db_initialised:
logger.info("Deferring %s. db is not initialised.", type(event))
event.defer()
return False

# adding/removing shards while a backup/restore is in progress can be disastrous
pbm_status = self.charm.backups.get_pbm_status()
if isinstance(pbm_status, MaintenanceStatus):
logger.info("Cannot add/remove shards while a backup/restore is in progress.")
event.defer()
return False

return True

def _proceed_on_broken_event(self, event) -> int:
"""Returns relation_id if relation broken event occurred due to a removed relation."""
departed_relation_id = None

# Only relation_deparated events can check if scaling down
departed_relation_id = event.relation.id
if not self.charm.has_departed_run(departed_relation_id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return
if isinstance(event, RelationBrokenEvent):
if not self.charm.has_departed_run(event.relation.id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return False

# check if were scaling down and add a log message
if self.charm.is_scaling_down(event.relation.id):
logger.info(
"Relation broken event occurring due to scale down, do not proceed to remove users."
)
return
if not self.charm.proceed_on_broken_event(event):
return False

return departed_relation_id
return True

def _on_relation_event(self, event):
"""Handles adding and removing of shards.
Expand All @@ -181,9 +170,7 @@ def _on_relation_event(self, event):

departed_relation_id = None
if isinstance(event, RelationBrokenEvent):
departed_relation_id = self._proceed_on_broken_event(event)
if not departed_relation_id:
return
departed_relation_id = event.relation.id

try:
logger.info("Adding/Removing shards not present in cluster.")
Expand Down Expand Up @@ -520,6 +507,11 @@ def _on_relation_changed(self, event):

def pass_hook_checks(self, event):
"""Runs the pre-hooks checks for ConfigServerRequirer, returns True if all pass."""
if not self.charm.db_initialised:
logger.info("Deferring %s. db is not initialised.", type(event))
event.defer()
return False

if not self.charm.is_relation_feasible(self.relation_name):
logger.info("Skipping event %s , relation not feasible.", type(event))
return False
Expand All @@ -528,11 +520,6 @@ def pass_hook_checks(self, event):
logger.info("skipping %s is only be executed by shards", type(event))
return False

if not self.charm.db_initialised:
logger.info("Deferring %s. db is not initialised.", type(event))
event.defer()
return False

mongos_hosts = event.relation.data[event.relation.app].get(HOSTS_KEY, None)
if isinstance(event, RelationBrokenEvent) and not mongos_hosts:
logger.info("Config-server relation never set up, no need to process broken event.")
Expand Down
24 changes: 22 additions & 2 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,10 +1319,30 @@ def set_scaling_down(self, event: RelationDepartedEvent) -> bool:
# check if relation departed is due to current unit being removed. (i.e. scaling down the
# application.)
rel_departed_key = self._generate_relation_departed_key(event.relation.id)
scaling_down = json.dumps(event.departing_unit == self.unit)
self.unit_peer_data[rel_departed_key] = scaling_down
scaling_down = event.departing_unit == self.unit
self.unit_peer_data[rel_departed_key] = json.dumps(scaling_down)
return scaling_down

def proceed_on_broken_event(self, event) -> bool:
"""Returns relation_id if relation broken event occurred due to a removed relation."""
# Only relation_deparated events can check if scaling down
departed_relation_id = event.relation.id
if not self.has_departed_run(departed_relation_id):
logger.info(
"Deferring, must wait for relation departed hook to decide if relation should be removed."
)
event.defer()
return False

# check if were scaling down and add a log message
if self.is_scaling_down(departed_relation_id):
logger.info(
"Relation broken event occurring due to scale down, do not proceed to remove users."
)
return False

return True

@staticmethod
def _generate_relation_departed_key(rel_id: int) -> str:
"""Generates the relation departed key for a specified relation id."""
Expand Down
Loading

0 comments on commit 18d56d0

Please sign in to comment.