diff --git a/src/charm.py b/src/charm.py index 15619fc55..0a275a680 100755 --- a/src/charm.py +++ b/src/charm.py @@ -53,6 +53,7 @@ UNIX_USER = "mongodb" UNIX_GROUP = "mongodb" MONGODB_EXPORTER_PORT = 9216 +REL_NAME = "database" class MongoDBCharm(CharmBase): @@ -253,6 +254,9 @@ def _reconfigure(self, event) -> None: event.defer() return mongo.add_replset_member(member) + + # app relations should be made aware of the new set of hosts + self._update_app_relation_data(mongo.get_users()) except NotReadyError: logger.info("Deferring reconfigure: another member doing sync right now") event.defer() @@ -260,6 +264,20 @@ def _reconfigure(self, event) -> None: logger.info("Deferring reconfigure: error=%r", e) event.defer() + def _update_app_relation_data(self, database_users): + """Helper function to update application relation data.""" + for relation in self.model.relations[REL_NAME]: + username = self.client_relations._get_username_from_relation_id(relation.id) + password = relation.data[self.app]["password"] + if username in database_users: + config = self.client_relations._get_config(username, password) + relation.data[self.app].update( + { + "endpoints": ",".join(config.hosts), + "uris": config.uri, + } + ) + @property def _mongodb_exporter_layer(self) -> Layer: """Returns a Pebble configuration layer for mongod.""" diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index ef97192d0..18c86a1b4 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -1,6 +1,7 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import logging import os import string import subprocess @@ -34,9 +35,11 @@ TIMEOUT = 15 * 60 TEST_DB = "continuous_writes_database" TEST_COLLECTION = "test_collection" -ANOTHER_DATABASE_APP_NAME = "another-database-a" +ANOTHER_DATABASE_APP_NAME = "another-database" EXCLUDED_APPS = [ANOTHER_DATABASE_APP_NAME] +logger = logging.getLogger(__name__) + mongodb_charm, application_charm = None, None @@ -311,17 +314,25 @@ async def mongod_ready(ops_test: OpsTest, unit: int) -> bool: return True -async def get_replica_set_primary(ops_test: OpsTest, excluded: List[str] = []) -> Optional[Unit]: +async def get_replica_set_primary( + ops_test: OpsTest, excluded: List[str] = [], application_name=APP_NAME +) -> Optional[Unit]: """Returns the primary unit name based no the replica set host.""" with await get_mongo_client(ops_test, excluded) as client: data = client.admin.command("replSetGetStatus") - unit_name = host_to_unit(primary_host(data)) + if unit_name: - mongodb_name = await get_application_name(ops_test, APP_NAME) + mongodb_name = await get_application_name(ops_test, application_name) for unit in ops_test.model.applications[mongodb_name].units: + logger.info( + f"Unit name: {unit.name}. Target unit name: {unit_name}, {unit.name == unit_name}" + ) if unit.name == unit_name: return unit + logger.error( + f"Target unit name {unit_name} not found in {ops_test.model.applications[mongodb_name].units}" + ) async def count_primaries(ops_test: OpsTest) -> int: diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 619d0b25e..d5acc9728 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -96,13 +96,19 @@ async def run_mongo_op( mongo_uri: str = None, suffix: str = "", expecting_output: bool = True, + stringify: bool = True, + ignore_errors: bool = False, ) -> SimpleNamespace(): """Runs provided MongoDB operation in a separate container.""" if mongo_uri is None: mongo_uri = await mongodb_uri(ops_test) - mongo_cmd = f"mongo --quiet --eval 'JSON.stringify({mongo_op})' {mongo_uri}{suffix}" + if stringify: + mongo_cmd = f"mongo --quiet --eval 'JSON.stringify({mongo_op})' {mongo_uri}{suffix}" + else: + mongo_cmd = f"mongo --quiet --eval '{mongo_op}' {mongo_uri}{suffix}" + logger.info("Running mongo command: %r", mongo_cmd) kubectl_cmd = ( "microk8s", "kubectl", @@ -127,6 +133,11 @@ async def run_mongo_op( if ret_code != 0: logger.error("code %r; stdout %r; stderr: %r", ret_code, stdout, stderr) output.failed = True + output.data = { + "code": ret_code, + "stdout": stdout, + "stderr": stderr, + } return output output.succeeded = True @@ -136,12 +147,14 @@ async def run_mongo_op( except Exception: logger.error( "Could not serialize the output into json.{}{}".format( - f"\n\tOut: {stdout}" if stdout else "", - f"\n\tErr: {stderr}" if stderr else "", + f"\n\tSTDOUT:\n\t {stdout}" if stdout else "", + f"\n\tSTDERR:\n\t {stderr}" if stderr else "", ) ) - raise - + if not ignore_errors: + raise + else: + output.data = stdout return output diff --git a/tests/integration/relation_tests/__init__.py b/tests/integration/relation_tests/__init__.py new file mode 100644 index 000000000..db3bfe1a6 --- /dev/null +++ b/tests/integration/relation_tests/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/integration/relation_tests/application-charm/charmcraft.yaml b/tests/integration/relation_tests/application-charm/charmcraft.yaml new file mode 100644 index 000000000..7e33b57ee --- /dev/null +++ b/tests/integration/relation_tests/application-charm/charmcraft.yaml @@ -0,0 +1,11 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +type: charm +bases: + - build-on: + - name: "ubuntu" + channel: "22.04" + run-on: + - name: "ubuntu" + channel: "22.04" diff --git a/tests/integration/relation_tests/application-charm/lib/charms/data_platform_libs/v0/database_requires.py b/tests/integration/relation_tests/application-charm/lib/charms/data_platform_libs/v0/database_requires.py new file mode 100644 index 000000000..3d1013808 --- /dev/null +++ b/tests/integration/relation_tests/application-charm/lib/charms/data_platform_libs/v0/database_requires.py @@ -0,0 +1,486 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Relation requirer side abstraction for database relation. + +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.database_requires import DatabaseRequires + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- database_created: event emitted when the requested database was created +- endpoints_changed: event emitted when the read/write endpoints of the database have changed +- read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed + +If it's needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It's also possible to provide an alias for each different database cluster/relation. + +So, it's possible to differentiate the clusters in two ways. +The first is to use the remote application name, ie `event.relation.app.name`, as mentioned above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.database_requires import DatabaseRequires + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` +""" + +import json +import logging +from collections import namedtuple +from datetime import datetime +from typing import List, Optional + +from ops.charm import ( + CharmEvents, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Relation + +# The unique Charmhub library identifier, never change it +LIBID = "0241e088ffa9440fb4e3126349b2fb62" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +logger = logging.getLogger(__name__) + + +class DatabaseEvent(RelationEvent): + """Base class for database events.""" + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints.""" + return self.relation.data[self.relation.app].get("endpoints") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + return self.relation.data[self.relation.app].get("password") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints.""" + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + return self.relation.data[self.relation.app].get("replset") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + return self.relation.data[self.relation.app].get("tls-ca") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch and Kafka only. + """ + return self.relation.data[self.relation.app].get("uris") + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + return self.relation.data[self.relation.app].get("username") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(DatabaseEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +class DatabaseRequires(Object): + """Requires-side of the database relation.""" + + on = DatabaseEvents() + + def __init__( + self, + charm, + relation_name: str, + database_name: str, + extra_user_roles: str = None, + relations_aliases: List[str] = None, + ): + """Manager of database client relations.""" + super().__init__(charm, relation_name) + self.charm = charm + self.database = database_name + self.extra_user_roles = extra_user_roles + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + self.relations_aliases = relations_aliases + self.framework.observe( + self.charm.on[relation_name].relation_joined, self._on_relation_joined_event + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, self._on_relation_changed_event + ) + + # Define custom event names for each alias. + if relations_aliases: + # Ensure the number of aliases match the maximum number + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[relation_name].limit + if len(relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(relations_aliases)}" + ) + + for relation_alias in relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + """ + # If this unit isn't the leader or no aliases were provided, return immediately. + if not self.local_unit.is_leader() or not self.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + if ( + self.charm.model.get_relation(self.relation_name, relation_id) + .data[self.local_app] + .get("alias") + ): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_name]: + alias = relation.data[self.local_app].get("alias") + if alias: + logger.debug(f"Alias {alias} was already assigned to relation {relation.id}") + available_aliases.remove(alias) + + # Set the alias in the application relation databag of the specific relation. + self._update_relation_data(relation_id, {"alias": available_aliases[0]}) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + old_data = json.loads(event.relation.data[self.charm.model.app].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = { + key: value for key, value in event.relation.data[event.app].items() if key != "data" + } + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key] + } + + # TODO: evaluate the possibility of losing the diff if some error + # happens in the charm before the diff is completely checked (DPE-412). + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[self.local_app].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + def _emit_aliased_event(self, relation: Relation, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + relation: a particular relation. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit(relation) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation wasn't found. + """ + for relation in self.charm.model.relations[self.relation_name]: + if relation.id == relation_id: + return relation.data[self.local_app].get("alias") + return None + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation id). + """ + data = {} + for relation in self.relations: + data[relation.id] = { + key: value for key, value in relation.data[relation.app].items() if key != "data" + } + return data + + def _update_relation_data(self, relation_id: int, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + if self.local_unit.is_leader(): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_app].update(data) + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the database relation.""" + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if self.extra_user_roles: + self._update_relation_data( + event.relation.id, + { + "database": self.database, + "extra-user-roles": self.extra_user_roles, + }, + ) + else: + self._update_relation_data(event.relation.id, {"database": self.database}) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + # Only the leader should handle this event. + if not self.charm.unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the database is created + # (the database charm shared the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + self.on.database_created.emit(event.relation) + + # Emit the aliased event (if any). + self._emit_aliased_event(event.relation, "database_created") + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info(f"endpoints changed on {datetime.now()}") + self.on.endpoints_changed.emit(event.relation) + + # Emit the aliased event (if any). + self._emit_aliased_event(event.relation, "endpoints_changed") + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info(f"read-only-endpoints changed on {datetime.now()}") + self.on.read_only_endpoints_changed.emit(event.relation) + + # Emit the aliased event (if any). + self._emit_aliased_event(event.relation, "read_only_endpoints_changed") + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) diff --git a/tests/integration/relation_tests/application-charm/metadata.yaml b/tests/integration/relation_tests/application-charm/metadata.yaml new file mode 100644 index 000000000..2ac8781ad --- /dev/null +++ b/tests/integration/relation_tests/application-charm/metadata.yaml @@ -0,0 +1,21 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +name: application +description: | + Data platform libs application charm used in integration tests. +summary: | + Data platform libs application meant to be used + only for testing of the libs in this repository. +series: + - focal + +requires: + first-database: + interface: mongodb_client + second-database: + interface: mongodb_client + multiple-database-clusters: + interface: mongodb_client + aliased-multiple-database-clusters: + interface: mongodb_client + limit: 2 diff --git a/tests/integration/relation_tests/application-charm/requirements.txt b/tests/integration/relation_tests/application-charm/requirements.txt new file mode 100644 index 000000000..96faf889a --- /dev/null +++ b/tests/integration/relation_tests/application-charm/requirements.txt @@ -0,0 +1 @@ +ops >= 1.4.0 diff --git a/tests/integration/relation_tests/application-charm/src/charm.py b/tests/integration/relation_tests/application-charm/src/charm.py new file mode 100755 index 000000000..893607411 --- /dev/null +++ b/tests/integration/relation_tests/application-charm/src/charm.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Application charm that connects to database charms. + +This charm is meant to be used only for testing +of the libraries in this repository. +""" + +import logging + +from charms.data_platform_libs.v0.database_requires import ( + DatabaseCreatedEvent, + DatabaseEndpointsChangedEvent, + DatabaseRequires, +) +from ops.charm import CharmBase +from ops.main import main +from ops.model import ActiveStatus + +logger = logging.getLogger(__name__) + +# Extra roles that this application needs when interacting with the database. +EXTRA_USER_ROLES = "admin" + + +class ApplicationCharm(CharmBase): + """Application charm that connects to database charms.""" + + def __init__(self, *args): + super().__init__(*args) + + # Default charm events. + self.framework.observe(self.on.start, self._on_start) + + # Events related to the first database that is requested + # (these events are defined in the database requires charm library). + database_name = f'{self.app.name.replace("-", "_")}_first_database' + self.first_database = DatabaseRequires( + self, "first-database", database_name, EXTRA_USER_ROLES + ) + self.framework.observe( + self.first_database.on.database_created, self._on_first_database_created + ) + self.framework.observe( + self.first_database.on.endpoints_changed, self._on_first_database_endpoints_changed + ) + + # Events related to the second database that is requested + # (these events are defined in the database requires charm library). + database_name = f'{self.app.name.replace("-", "_")}_second_database' + self.second_database = DatabaseRequires( + self, "second-database", database_name, EXTRA_USER_ROLES + ) + self.framework.observe( + self.second_database.on.database_created, self._on_second_database_created + ) + self.framework.observe( + self.second_database.on.endpoints_changed, self._on_second_database_endpoints_changed + ) + + # Multiple database clusters charm events (clusters/relations without alias). + database_name = f'{self.app.name.replace("-", "_")}_multiple_database_clusters' + self.database_clusters = DatabaseRequires( + self, "multiple-database-clusters", database_name, EXTRA_USER_ROLES + ) + self.framework.observe( + self.database_clusters.on.database_created, self._on_cluster_database_created + ) + self.framework.observe( + self.database_clusters.on.endpoints_changed, + self._on_cluster_endpoints_changed, + ) + + # Multiple database clusters charm events (defined dynamically + # in the database requires charm library, using the provided cluster/relation aliases). + database_name = f'{self.app.name.replace("-", "_")}_aliased_multiple_database_clusters' + cluster_aliases = ["cluster1", "cluster2"] # Aliases for the multiple clusters/relations. + self.aliased_database_clusters = DatabaseRequires( + self, + "aliased-multiple-database-clusters", + database_name, + EXTRA_USER_ROLES, + cluster_aliases, + ) + # Each database cluster will have its own events + # with the name having the cluster/relation alias as the prefix. + self.framework.observe( + self.aliased_database_clusters.on.cluster1_database_created, + self._on_cluster1_database_created, + ) + self.framework.observe( + self.aliased_database_clusters.on.cluster1_endpoints_changed, + self._on_cluster1_endpoints_changed, + ) + self.framework.observe( + self.aliased_database_clusters.on.cluster2_database_created, + self._on_cluster2_database_created, + ) + self.framework.observe( + self.aliased_database_clusters.on.cluster2_endpoints_changed, + self._on_cluster2_endpoints_changed, + ) + + def _on_start(self, _) -> None: + """Only sets an Active status.""" + self.unit.status = ActiveStatus() + + # First database events observers. + def _on_first_database_created(self, event: DatabaseCreatedEvent) -> None: + """Event triggered when a database was created for this application.""" + # Retrieve the credentials using the charm library. + logger.info(f"first database credentials: {event.username} {event.password}") + self.unit.status = ActiveStatus("received database credentials of the first database") + + def _on_first_database_endpoints_changed(self, event: DatabaseEndpointsChangedEvent) -> None: + """Event triggered when the read/write endpoints of the database change.""" + logger.info(f"first database endpoints have been changed to: {event.endpoints}") + + # Second database events observers. + def _on_second_database_created(self, event: DatabaseCreatedEvent) -> None: + """Event triggered when a database was created for this application.""" + # Retrieve the credentials using the charm library. + logger.info(f"second database credentials: {event.username} {event.password}") + self.unit.status = ActiveStatus("received database credentials of the second database") + + def _on_second_database_endpoints_changed(self, event: DatabaseEndpointsChangedEvent) -> None: + """Event triggered when the read/write endpoints of the database change.""" + logger.info(f"second database endpoints have been changed to: {event.endpoints}") + + # Multiple database clusters events observers. + def _on_cluster_database_created(self, event: DatabaseCreatedEvent) -> None: + """Event triggered when a database was created for this application.""" + # Retrieve the credentials using the charm library. + logger.info( + f"cluster {event.relation.app.name} credentials: {event.username} {event.password}" + ) + self.unit.status = ActiveStatus( + f"received database credentials for cluster {event.relation.app.name}" + ) + + def _on_cluster_endpoints_changed(self, event: DatabaseEndpointsChangedEvent) -> None: + """Event triggered when the read/write endpoints of the database change.""" + logger.info( + f"cluster {event.relation.app.name} endpoints have been changed to: {event.endpoints}" + ) + + # Multiple database clusters events observers (for aliased clusters/relations). + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + """Event triggered when a database was created for this application.""" + # Retrieve the credentials using the charm library. + logger.info(f"cluster1 credentials: {event.username} {event.password}") + self.unit.status = ActiveStatus("received database credentials for cluster1") + + def _on_cluster1_endpoints_changed(self, event: DatabaseEndpointsChangedEvent) -> None: + """Event triggered when the read/write endpoints of the database change.""" + logger.info(f"cluster1 endpoints have been changed to: {event.endpoints}") + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + """Event triggered when a database was created for this application.""" + # Retrieve the credentials using the charm library. + logger.info(f"cluster2 credentials: {event.username} {event.password}") + self.unit.status = ActiveStatus("received database credentials for cluster2") + + def _on_cluster2_endpoints_changed(self, event: DatabaseEndpointsChangedEvent) -> None: + """Event triggered when the read/write endpoints of the database change.""" + logger.info(f"cluster2 endpoints have been changed to: {event.endpoints}") + + +if __name__ == "__main__": + main(ApplicationCharm) diff --git a/tests/integration/relation_tests/helpers.py b/tests/integration/relation_tests/helpers.py new file mode 100644 index 000000000..0a27f863c --- /dev/null +++ b/tests/integration/relation_tests/helpers.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +import json +from typing import Optional + +import yaml +from pytest_operator.plugin import OpsTest +from tenacity import RetryError, Retrying, stop_after_delay, wait_fixed + + +async def get_application_relation_data( + ops_test: OpsTest, + application_name: str, + relation_name: str, + key: str, + relation_id: str = None, + relation_alias: str = None, +) -> Optional[str]: + """Get relation data for an application. + + Args: + ops_test: The ops test framework instance + application_name: The name of the application + relation_name: name of the relation to get connection data from + key: key of data to be retrieved + relation_id: id of the relation to get connection data from + relation_alias: alias of the relation (like a connection name) + to get connection data from + Returns: + the that that was requested or None + if no data in the relation + Raises: + ValueError if it's not possible to get application unit data + or if there is no data for the particular relation endpoint + and/or alias. + """ + unit_name = f"{application_name}/0" + raw_data = (await ops_test.juju("show-unit", unit_name))[1] + + if not raw_data: + raise ValueError(f"no unit info could be grabbed for {unit_name}") + data = yaml.safe_load(raw_data) + + # Filter the data based on the relation name. + relation_data = [v for v in data[unit_name]["relation-info"] if v["endpoint"] == relation_name] + + if relation_id: + # Filter the data based on the relation id. + relation_data = [v for v in relation_data if v["relation-id"] == relation_id] + + if relation_alias: + # Filter the data based on the cluster/relation alias. + relation_data = [ + v + for v in relation_data + if json.loads(v["application-data"]["data"])["alias"] == relation_alias + ] + + if len(relation_data) == 0: + raise ValueError( + f"no relation data could be grabbed on relation with endpoint {relation_name} and alias {relation_alias}" + ) + + return relation_data[0]["application-data"].get(key) + + +async def verify_application_data( + ops_test: OpsTest, + application_name: str, + database_app: str, + relation_name: str, +) -> bool: + """Verifies the application relation metadata matches with the MongoDB deployment. + + Specifically, it verifies that all units are present in the URI and that there are no + additional units + """ + try: + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(3)): + with attempt: + endpoints_str = await get_application_relation_data( + ops_test, application_name, relation_name, "endpoints" + ) + for unit in ops_test.model.applications[database_app].units: + if unit.public_address not in endpoints_str: + raise Exception(f"unit {unit.name} not present in connection URI") + + if len(endpoints_str.split(",")) != len( + ops_test.model.applications[database_app].units + ): + raise Exception( + "number of endpoints in replicaset URI do not match number of units" + ) + + except RetryError: + return False + + return True diff --git a/tests/integration/relation_tests/test_charm_relations.py b/tests/integration/relation_tests/test_charm_relations.py new file mode 100644 index 000000000..d27d33025 --- /dev/null +++ b/tests/integration/relation_tests/test_charm_relations.py @@ -0,0 +1,439 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +import asyncio +import logging +import time +from pathlib import Path + +import pytest +import yaml +from pymongo.uri_parser import parse_uri +from pytest_operator.plugin import OpsTest +from tenacity import RetryError + +from ..ha_tests.helpers import get_replica_set_primary as replica_set_primary +from ..helpers import run_mongo_op +from .helpers import get_application_relation_data, verify_application_data + +logger = logging.getLogger(__name__) + +MEDIAN_REELECTION_TIME = 12 +APPLICATION_APP_NAME = "application" +DATABASE_METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +PORT = 27017 +DATABASE_APP_NAME = "mongodb-k8s" +FIRST_DATABASE_RELATION_NAME = "first-database" +SECOND_DATABASE_RELATION_NAME = "second-database" +MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME = "multiple-database-clusters" +ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME = "aliased-multiple-database-clusters" +ANOTHER_DATABASE_APP_NAME = "another-database" +APP_NAMES = [APPLICATION_APP_NAME, DATABASE_APP_NAME, ANOTHER_DATABASE_APP_NAME] +TEST_APP_CHARM_PATH = "tests/integration/relation_tests/application-charm" + + +@pytest.mark.abort_on_fail +async def test_deploy_charms(ops_test: OpsTest): + """Deploy both charms (application and database) to use in the tests.""" + # Deploy both charms (2 units for each application to test that later they correctly + # set data in the relation application databag using only the leader unit). + + application_charm = await ops_test.build_charm(TEST_APP_CHARM_PATH) + database_charm = await ops_test.build_charm(".") + db_resources = { + "mongodb-image": DATABASE_METADATA["resources"]["mongodb-image"]["upstream-source"] + } + await asyncio.gather( + ops_test.model.deploy( + application_charm, + application_name=APPLICATION_APP_NAME, + num_units=2, + ), + ops_test.model.deploy( + database_charm, + application_name=DATABASE_APP_NAME, + resources=db_resources, + num_units=1, + ), + ops_test.model.deploy( + database_charm, application_name=ANOTHER_DATABASE_APP_NAME, resources=db_resources + ), + ) + await ops_test.model.wait_for_idle( + apps=APP_NAMES, status="active", wait_for_units=1, timeout=1000 + ) + + +async def verify_crud_operations(ops_test: OpsTest, connection_string: str): + # insert some data + cmd = ( + 'var ubuntu = {"release_name": "Focal Fossa", "version": "20.04", "LTS": "true"}; ' + "JSON.stringify(db.test_collection.insertOne(ubuntu));" + ) + result = await run_mongo_op(ops_test, cmd, f'"{connection_string}"', stringify=False) + assert result.data["acknowledged"] is True + + # query the data + cmd = 'db.test_collection.find({}, {"release_name": 1}).toArray()' + result = await run_mongo_op( + ops_test, f"JSON.stringify({cmd})", f'"{connection_string}"', stringify=False + ) + assert result.data[0]["release_name"] == "Focal Fossa" + + # update the data + ubuntu_version = '{"version": "20.04"}' + ubuntu_name_updated = '{"$set": {"release_name": "Fancy Fossa"}}' + cmd = f"db.test_collection.updateOne({ubuntu_version}, {ubuntu_name_updated})" + result = await run_mongo_op(ops_test, cmd, f'"{connection_string}"', stringify=False) + assert result.data["acknowledged"] is True + + # query the data + cmd = 'db.test_collection.find({}, {"release_name": 1}).toArray()' + result = await run_mongo_op( + ops_test, f"JSON.stringify({cmd})", f'"{connection_string}"', stringify=False + ) + assert len(result.data) == 1 + assert result.data[0]["release_name"] == "Fancy Fossa" + + # delete the data + cmd = 'db.test_collection.deleteOne({"release_name": "Fancy Fossa"})' + result = await run_mongo_op(ops_test, cmd, f'"{connection_string}"', stringify=False) + assert result.data["acknowledged"] is True + + # query the data + cmd = 'db.test_collection.find({}, {"release_name": 1}).toArray()' + result = await run_mongo_op( + ops_test, f"JSON.stringify({cmd})", f'"{connection_string}"', stringify=False + ) + assert len(result.data) == 0 + + +@pytest.mark.abort_on_fail +async def test_database_relation_with_charm_libraries(ops_test: OpsTest): + """Test basic functionality of database relation interface.""" + # Relate the charms and wait for them exchanging some connection data. + await ops_test.model.add_relation( + f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION_NAME}", DATABASE_APP_NAME + ) + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + + await verify_crud_operations(ops_test, connection_string) + + +async def verify_primary(ops_test: OpsTest, application_name: str): + # verify primary is present in hosts provided to application + # sleep for twice the median election time + time.sleep(MEDIAN_REELECTION_TIME * 2) + await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "endpoints" + ) + + try: + primary = await replica_set_primary(ops_test, application_name=application_name) + except RetryError: + assert False, "replica set has no primary" + + assert primary is not None, "Replica set has no primary" + + +@pytest.mark.abort_on_fail +async def test_app_relation_metadata_change(ops_test: OpsTest) -> None: + """Verifies that the app metadata changes with db relation joined and departed events.""" + # verify application metadata is correct before adding/removing units. + try: + await verify_application_data( + ops_test, APPLICATION_APP_NAME, DATABASE_APP_NAME, FIRST_DATABASE_RELATION_NAME + ) + except RetryError: + assert False, "Hosts are not correct in application data." + + connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + + connection_data = parse_uri(connection_string) + assert len(connection_data["nodelist"]) == 1 + assert connection_data["nodelist"][0][0] == "mongodb-k8s-0.mongodb-k8s-endpoints" + + # verify application metadata is correct after adding units. + await ops_test.model.applications[DATABASE_APP_NAME].add_units(count=2) + await ops_test.model.wait_for_idle( + apps=APP_NAMES, + status="active", + timeout=1000, + ) + + try: + await verify_application_data( + ops_test, APPLICATION_APP_NAME, DATABASE_APP_NAME, FIRST_DATABASE_RELATION_NAME + ) + except RetryError: + assert False, "Hosts not updated in application data after adding units." + + await verify_primary(ops_test, DATABASE_APP_NAME) + + scaled_up_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + + scaled_up_data = parse_uri(scaled_up_string) + assert len(scaled_up_data["nodelist"]) == 3 + scaled_up_data["nodelist"].sort() + assert all( + [ + a[0] == b + for a, b in zip( + scaled_up_data["nodelist"], + [ + "mongodb-k8s-0.mongodb-k8s-endpoints", + "mongodb-k8s-1.mongodb-k8s-endpoints", + "mongodb-k8s-2.mongodb-k8s-endpoints", + ], + ) + ] + ) + + # test crud operations + await verify_crud_operations(ops_test, scaled_up_string) + + # verify application metadata is correct after removing the pre-existing units. This is + # this is important since we want to test that the application related will work with + # only the newly added units from above. + await ops_test.model.applications[DATABASE_APP_NAME].scale(scale_change=-1) + await ops_test.model.wait_for_idle( + apps=APP_NAMES, + status="active", + timeout=1000, + ) + + await verify_primary(ops_test, DATABASE_APP_NAME) + + scaled_down_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + + scaled_down_data = parse_uri(scaled_down_string) + assert len(scaled_down_data["nodelist"]) == 2 + scaled_down_data["nodelist"].sort() + assert all( + [ + a[0] == b + for a, b in zip( + scaled_down_data["nodelist"], + ["mongodb-k8s-0.mongodb-k8s-endpoints", "mongodb-k8s-1.mongodb-k8s-endpoints"], + ) + ] + ) + # test crud operations + await verify_crud_operations(ops_test, scaled_down_string) + + await ops_test.model.applications[DATABASE_APP_NAME].scale(scale_change=-1) + await ops_test.model.wait_for_idle( + apps=APP_NAMES, + status="active", + timeout=1000, + ) + + try: + await verify_application_data( + ops_test, APPLICATION_APP_NAME, DATABASE_APP_NAME, FIRST_DATABASE_RELATION_NAME + ) + except RetryError: + assert False, "Hosts not updated in application data after removing units." + + await verify_primary(ops_test, DATABASE_APP_NAME) + + scaled_down_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + scaled_down_data = parse_uri(scaled_down_string) + assert len(scaled_down_data["nodelist"]) == 1 + assert scaled_down_data["nodelist"][0][0] == "mongodb-k8s-0.mongodb-k8s-endpoints" + + # test crud operations + await verify_crud_operations(ops_test, scaled_down_string) + + +async def test_user_with_extra_roles(ops_test: OpsTest): + """Test superuser actions (ie creating a new user and creating a new database).""" + connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + database = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "database" + ) + + cmd = f'db.createUser({{user: "newTestUser", pwd: "Test123", roles: [{{role: "readWrite", db: "{database}"}}]}});' + result = await run_mongo_op( + ops_test, cmd, f'"{connection_string}"', stringify=False, ignore_errors=True + ) + assert 'user" : "newTestUser"' in result.data + cmd = 'db = db.getSiblingDB("new_database"); db.test_collection.insertOne({"test": "one"});' + result = await run_mongo_op( + ops_test, cmd, f'"{connection_string}"', stringify=False, ignore_errors=True + ) + assert '"acknowledged" : true' in result.data + + +async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: OpsTest): + """Test that two different application connect to the database with different credentials.""" + # Set some variables to use in this test. + application_charm = await ops_test.build_charm(TEST_APP_CHARM_PATH) + another_application_app_name = "another-application" + all_app_names = [another_application_app_name] + all_app_names.extend(APP_NAMES) + + # Deploy another application. + await ops_test.model.deploy( + application_charm, + application_name=another_application_app_name, + ) + await ops_test.model.wait_for_idle(apps=all_app_names, status="active") + + # Relate the new application with the database + # and wait for them exchanging some connection data. + await ops_test.model.add_relation( + f"{another_application_app_name}:{FIRST_DATABASE_RELATION_NAME}", DATABASE_APP_NAME + ) + await ops_test.model.wait_for_idle(apps=all_app_names, status="active") + + # Assert the two application have different relation (connection) data. + application_connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + another_application_connection_string = await get_application_relation_data( + ops_test, another_application_app_name, FIRST_DATABASE_RELATION_NAME, "uris" + ) + assert application_connection_string != another_application_connection_string + + +async def test_an_application_can_connect_to_multiple_database_clusters(ops_test: OpsTest): + """Test that an application can connect to different clusters of the same database.""" + # Relate the application with both database clusters + # and wait for them exchanging some connection data. + first_cluster_relation = await ops_test.model.add_relation( + f"{APPLICATION_APP_NAME}:{MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME}", DATABASE_APP_NAME + ) + second_cluster_relation = await ops_test.model.add_relation( + f"{APPLICATION_APP_NAME}:{MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME}", + ANOTHER_DATABASE_APP_NAME, + ) + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + + # Retrieve the connection string to both database clusters using the relation aliases + # and assert they are different. + application_connection_string = await get_application_relation_data( + ops_test, + APPLICATION_APP_NAME, + MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME, + "uris", + relation_id=first_cluster_relation.id, + ) + + another_application_connection_string = await get_application_relation_data( + ops_test, + APPLICATION_APP_NAME, + MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME, + "uris", + relation_id=second_cluster_relation.id, + ) + + assert application_connection_string != another_application_connection_string + + +async def test_an_application_can_connect_to_multiple_aliased_database_clusters(ops_test: OpsTest): + """Test that an application can connect to different clusters of the same database.""" + # Relate the application with both database clusters + # and wait for them exchanging some connection data. + await asyncio.gather( + ops_test.model.add_relation( + f"{APPLICATION_APP_NAME}:{ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME}", + DATABASE_APP_NAME, + ), + ops_test.model.add_relation( + f"{APPLICATION_APP_NAME}:{ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME}", + ANOTHER_DATABASE_APP_NAME, + ), + ) + + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + + # Retrieve the connection string to both database clusters using the relation aliases + # and assert they are different. + application_connection_string = await get_application_relation_data( + ops_test, + APPLICATION_APP_NAME, + ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME, + "uris", + relation_alias="cluster1", + ) + + another_application_connection_string = await get_application_relation_data( + ops_test, + APPLICATION_APP_NAME, + ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME, + "uris", + relation_alias="cluster2", + ) + + assert application_connection_string != another_application_connection_string + + +async def test_an_application_can_request_multiple_databases(ops_test: OpsTest): + """Test that an application can request additional databases using the same interface.""" + # Relate the charms using another relation and wait for them exchanging some connection data. + await ops_test.model.add_relation( + f"{APPLICATION_APP_NAME}:{SECOND_DATABASE_RELATION_NAME}", DATABASE_APP_NAME + ) + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + + # Get the connection strings to connect to both databases. + first_database_connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + second_database_connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, SECOND_DATABASE_RELATION_NAME, "uris" + ) + + # Assert the two application have different relation (connection) data. + assert first_database_connection_string != second_database_connection_string + + +async def test_removed_relation_no_longer_has_access(ops_test: OpsTest): + """Verify removed applications no longer have access to the database.""" + # before removing relation we need its authorisation via connection string + connection_string = await get_application_relation_data( + ops_test, APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "uris" + ) + + await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( + f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION_NAME}", f"{DATABASE_APP_NAME}:database" + ) + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + + removed_access = False + cmd = "db.runCommand({ replSetGetStatus : 1 });" + result = await run_mongo_op( + ops_test, cmd, f'"{connection_string}"', stringify=False, ignore_errors=True + ) + + removed_access = False + if ( + result.failed + and "code" in result.data + and result.data["code"] == 1 + and "AuthenticationFailed" in result.data["stdout"] + ): + removed_access = True + elif result.failed: + raise Exception( + "OperationFailure: code {}; stdout {}; stderr: {}".format( + result.data["code"], result.data["stdout"], result.data["stderr"] + ) + ) + assert ( + removed_access + ), "application: {APPLICATION_APP_NAME} still has access to mongodb after relation removal." diff --git a/tox.ini b/tox.ini index f8beab59f..6590fbd1b 100644 --- a/tox.ini +++ b/tox.ini @@ -17,6 +17,7 @@ allowlist_externals = poetry set_env = PYTHONPATH = {tox_root}/lib:{[vars]src_path} PYTHONBREAKPOINT=ipdb.set_trace + PYTHONDONTWRITEBYTECODE=1 PY_COLORS=1 pass_env = PYTHONPATH @@ -106,3 +107,15 @@ commands_pre = poetry run pip install juju==2.9.42.1 commands = poetry run pytest -v --tb native --log-cli-level=INFO -s --durations=0 {posargs} {[vars]tests_path}/integration/tls_tests/test_tls.py + +[testenv:relation-integration] +description = Run relation integration tests +pass_env = + {[testenv]pass_env} + CI + CI_PACKED_CHARMS +commands_pre = + poetry install --with integration + poetry run pip install juju==2.9.42.1 +commands = + poetry run pytest -v --tb native --log-cli-level=INFO -s --durations=0 {posargs} {[vars]tests_path}/integration/relation_tests/test_charm_relations.py