diff --git a/lib/charms/data_platform_libs/v0/upgrade.py b/lib/charms/data_platform_libs/v0/upgrade.py index ef74644de..18a58ff09 100644 --- a/lib/charms/data_platform_libs/v0/upgrade.py +++ b/lib/charms/data_platform_libs/v0/upgrade.py @@ -285,7 +285,7 @@ def restart(self, event) -> None: # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 16 +LIBPATCH = 17 PYDEPS = ["pydantic>=1.10,<2", "poetry-core"] @@ -907,6 +907,17 @@ def _on_upgrade_charm(self, event: UpgradeCharmEvent) -> None: logger.error(e) self.set_unit_failed() return + top_unit_id = self.upgrade_stack[-1] + top_unit = self.charm.model.get_unit(f"{self.charm.app.name}/{top_unit_id}") + if ( + top_unit == self.charm.unit + and self.peer_relation.data[self.charm.unit].get("state") == "recovery" + ): + # While in a rollback and the Juju leader unit is the top unit in the upgrade stack, emit the event + # for this unit to start the rollback. + self.peer_relation.data[self.charm.unit].update({"state": "ready"}) + self.on_upgrade_changed(event) + return self.charm.unit.status = WaitingStatus("other units upgrading first...") self.peer_relation.data[self.charm.unit].update({"state": "ready"}) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 8783f7681..ffddc6636 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -36,7 +36,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 26 +LIBPATCH = 27 INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles" @@ -111,20 +111,19 @@ def __init__( self.system_users = system_users def _connect_to_database( - self, database: str = None, connect_to_current_host: bool = False + self, database: str = None, database_host: str = None ) -> psycopg2.extensions.connection: """Creates a connection to the database. Args: database: database to connect to (defaults to the database provided when the object for this class was created). - connect_to_current_host: whether to connect to the current host - instead of the primary host. + database_host: host to connect to instead of the primary host. Returns: psycopg2 connection object. """ - host = self.current_host if connect_to_current_host else self.primary_host + host = database_host if database_host is not None else self.primary_host connection = psycopg2.connect( f"dbname='{database if database else self.database}' user='{self.user}' host='{host}'" f"password='{self.password}' connect_timeout=1" @@ -388,7 +387,7 @@ def get_postgresql_text_search_configs(self) -> Set[str]: Set of PostgreSQL text search configs. """ with self._connect_to_database( - connect_to_current_host=True + database_host=self.current_host ) as connection, connection.cursor() as cursor: cursor.execute("SELECT CONCAT('pg_catalog.', cfgname) FROM pg_ts_config;") text_search_configs = cursor.fetchall() @@ -401,7 +400,7 @@ def get_postgresql_timezones(self) -> Set[str]: Set of PostgreSQL timezones. """ with self._connect_to_database( - connect_to_current_host=True + database_host=self.current_host ) as connection, connection.cursor() as cursor: cursor.execute("SELECT name FROM pg_timezone_names;") timezones = cursor.fetchall() @@ -434,7 +433,7 @@ def is_tls_enabled(self, check_current_host: bool = False) -> bool: """ try: with self._connect_to_database( - connect_to_current_host=check_current_host + database_host=self.current_host if check_current_host else None ) as connection, connection.cursor() as cursor: cursor.execute("SHOW ssl;") return "on" in cursor.fetchone()[0] @@ -502,19 +501,24 @@ def set_up_database(self) -> None: if connection is not None: connection.close() - def update_user_password(self, username: str, password: str) -> None: + def update_user_password( + self, username: str, password: str, database_host: str = None + ) -> None: """Update a user password. Args: username: the user to update the password. password: the new password for the user. + database_host: the host to connect to. Raises: PostgreSQLUpdateUserPasswordError if the password couldn't be changed. """ connection = None try: - with self._connect_to_database() as connection, connection.cursor() as cursor: + with self._connect_to_database( + database_host=database_host + ) as connection, connection.cursor() as cursor: cursor.execute( sql.SQL("ALTER USER {} WITH ENCRYPTED PASSWORD '" + password + "';").format( sql.Identifier(username) @@ -610,7 +614,7 @@ def validate_date_style(self, date_style: str) -> bool: """ try: with self._connect_to_database( - connect_to_current_host=True + database_host=self.current_host ) as connection, connection.cursor() as cursor: cursor.execute( sql.SQL( diff --git a/lib/charms/postgresql_k8s/v0/postgresql_tls.py b/lib/charms/postgresql_k8s/v0/postgresql_tls.py index 450752be9..740a607fb 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql_tls.py +++ b/lib/charms/postgresql_k8s/v0/postgresql_tls.py @@ -34,6 +34,7 @@ from ops.charm import ActionEvent, RelationBrokenEvent from ops.framework import Object from ops.pebble import ConnectionError, PathError, ProtocolError +from tenacity import RetryError # The unique Charmhub library identifier, never change it LIBID = "c27af44a92df4ef38d7ae06418b2800f" @@ -43,7 +44,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version. -LIBPATCH = 8 +LIBPATCH = 9 logger = logging.getLogger(__name__) SCOPE = "unit" @@ -142,7 +143,7 @@ def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: logger.debug("Cannot push TLS certificates at this moment") event.defer() return - except (ConnectionError, PathError, ProtocolError) as e: + except (ConnectionError, PathError, ProtocolError, RetryError) as e: logger.error("Cannot push TLS certificates: %r", e) event.defer() return diff --git a/src/charm.py b/src/charm.py index a2c3342f5..8633a99a8 100755 --- a/src/charm.py +++ b/src/charm.py @@ -121,8 +121,7 @@ def __init__(self, *args): self.legacy_db_admin_relation = DbProvides(self, admin=True) self.tls = PostgreSQLTLS(self, PEER_RELATION_NAME) - self._cores = max(min(os.cpu_count(), 4), 2) - self.service_ids = list(range(self._cores)) + self.service_ids = list(range(self.instances_count)) self.pgb_services = [ f"{PGB}-{self.app.name}@{service_id}" for service_id in self.service_ids ] @@ -147,10 +146,28 @@ def __init__(self, *args): self, relation_name=TRACING_RELATION_NAME, protocols=[TRACING_PROTOCOL] ) + @property + def instances_count(self): + """Returns the amount of instances to spin based on expose.""" + if self._is_exposed: + return max(min(os.cpu_count(), 4), 2) + else: + return 1 + # ======================= # Charm Lifecycle Hooks # ======================= + def create_instance_directories(self): + """Create configuration directories for pgbouncer instances.""" + pg_user = pwd.getpwnam(PG_USER) + app_conf_dir = f"{PGB_CONF_DIR}/{self.app.name}" + + # Make a directory for each service to store configs. + for service_id in self.service_ids: + os.makedirs(f"{app_conf_dir}/{INSTANCE_DIR}{service_id}", 0o700, exist_ok=True) + os.chown(f"{app_conf_dir}/{INSTANCE_DIR}{service_id}", pg_user.pw_uid, pg_user.pw_gid) + @property def tracing_endpoint(self) -> Optional[str]: """Otlp http endpoint for charm instrumentation.""" @@ -159,11 +176,6 @@ def tracing_endpoint(self) -> Optional[str]: def render_utility_files(self): """Render charm utility services and configuration.""" - # Initialise pgbouncer.ini config files from defaults set in charm lib and current config. - # We'll add basic configs for now even if this unit isn't a leader, so systemd doesn't - # throw a fit. - self.render_pgb_config() - # Render pgbouncer service file and reload systemd with open("templates/pgbouncer.service.j2", "r") as file: template = Template(file.read()) @@ -214,14 +226,8 @@ def _on_install(self, _) -> None: error_message = "Failed to stop and disable pgbackrest snap service" logger.exception(error_message, exc_info=e) - pg_user = pwd.getpwnam(PG_USER) - app_conf_dir = f"{PGB_CONF_DIR}/{self.app.name}" - - # Make a directory for each service to store configs. - for service_id in self.service_ids: - os.makedirs(f"{app_conf_dir}/{INSTANCE_DIR}{service_id}", 0o700, exist_ok=True) - os.chown(f"{app_conf_dir}/{INSTANCE_DIR}{service_id}", pg_user.pw_uid, pg_user.pw_gid) - + self.create_instance_directories() + self.render_pgb_config() self.render_utility_files() self.unit.status = WaitingStatus("Waiting to start PgBouncer") @@ -501,7 +507,8 @@ def _on_config_changed(self, event) -> None: restarts pgbouncer to apply changes. """ old_port = self.peers.app_databag.get("current_port") - if old_port != str(self.config["listen_port"]) and self._is_exposed: + port_changed = old_port != str(self.config["listen_port"]) + if port_changed and self._is_exposed: if self.unit.is_leader(): self.peers.app_databag["current_port"] = str(self.config["listen_port"]) # Open port @@ -515,7 +522,7 @@ def _on_config_changed(self, event) -> None: # TODO hitting upgrade errors here due to secrets labels failing to set on non-leaders. # deferring until the leader manages to set the label try: - self.render_pgb_config(reload_pgbouncer=True) + self.render_pgb_config(reload_pgbouncer=True, restart=port_changed) except ModelError: logger.warning("Deferring on_config_changed: cannot set secret label") event.defer() @@ -548,13 +555,16 @@ def check_pgb_running(self): return True - def reload_pgbouncer(self): + def reload_pgbouncer(self, restart=False): """Restarts systemd pgbouncer service.""" initial_status = self.unit.status self.unit.status = MaintenanceStatus("Reloading Pgbouncer") try: for service in self.pgb_services: - systemd.service_restart(service) + if restart or not systemd.service_running(service): + systemd.service_restart(service) + else: + systemd.service_reload(service) self.unit.status = initial_status except systemd.SystemdError as e: logger.error(e) @@ -683,7 +693,7 @@ def _get_relation_config(self) -> [Dict[str, Dict[str, Union[str, bool]]]]: } return pgb_dbs - def render_pgb_config(self, reload_pgbouncer=False): + def render_pgb_config(self, reload_pgbouncer=False, restart=False): """Derives config files for the number of required services from given config. This method takes a primary config and generates one unique config for each intended @@ -692,6 +702,17 @@ def render_pgb_config(self, reload_pgbouncer=False): initial_status = self.unit.status self.unit.status = MaintenanceStatus("updating PgBouncer config") + # If exposed relation, open the listen port for all units + if self._is_exposed: + # Open port + try: + self.unit.open_port("tcp", self.config["listen_port"]) + except ModelError: + logger.exception("failed to open port") + + self.create_instance_directories() + self.render_utility_files() + # Render primary config. This config is the only copy that the charm reads from to create # PgbConfig objects, and is modified below to implement individual services. app_conf_dir = f"{PGB_CONF_DIR}/{self.app.name}" @@ -704,7 +725,7 @@ def render_pgb_config(self, reload_pgbouncer=False): min_pool_size = 10 reserve_pool_size = 10 else: - effective_db_connections = max_db_connections / self._cores + effective_db_connections = max_db_connections / self.instances_count default_pool_size = math.ceil(effective_db_connections / 2) min_pool_size = math.ceil(effective_db_connections / 4) reserve_pool_size = math.ceil(effective_db_connections / 4) @@ -720,37 +741,40 @@ def render_pgb_config(self, reload_pgbouncer=False): # Modify & render config files for each service instance for service_id in self.service_ids: self.unit.status = MaintenanceStatus("updating PgBouncer config") - self.render_file( - f"{app_conf_dir}/{INSTANCE_DIR}{service_id}/pgbouncer.ini", - template.render( - databases=databases, - readonly_databases=readonly_dbs, - peer_id=service_id, - base_socket_dir=f"{app_temp_dir}/{INSTANCE_DIR}", - peers=self.service_ids, - log_file=f"{app_log_dir}/{INSTANCE_DIR}{service_id}/pgbouncer.log", - pid_file=f"{app_temp_dir}/{INSTANCE_DIR}{service_id}/pgbouncer.pid", - listen_addr=addr, - listen_port=self.config["listen_port"], - pool_mode=self.config["pool_mode"], - max_db_connections=max_db_connections, - default_pool_size=default_pool_size, - min_pool_size=min_pool_size, - reserve_pool_size=reserve_pool_size, - stats_user=self.backend.stats_user, - auth_query=self.backend.auth_query, - auth_file=f"{app_conf_dir}/{AUTH_FILE_NAME}", - enable_tls=enable_tls, - key_file=f"{app_conf_dir}/{TLS_KEY_FILE}", - ca_file=f"{app_conf_dir}/{TLS_CA_FILE}", - cert_file=f"{app_conf_dir}/{TLS_CERT_FILE}", - ), - 0o700, - ) + try: + self.render_file( + f"{app_conf_dir}/{INSTANCE_DIR}{service_id}/pgbouncer.ini", + template.render( + databases=databases, + readonly_databases=readonly_dbs, + peer_id=service_id, + base_socket_dir=f"{app_temp_dir}/{INSTANCE_DIR}", + peers=self.service_ids, + log_file=f"{app_log_dir}/{INSTANCE_DIR}{service_id}/pgbouncer.log", + pid_file=f"{app_temp_dir}/{INSTANCE_DIR}{service_id}/pgbouncer.pid", + listen_addr=addr, + listen_port=self.config["listen_port"], + pool_mode=self.config["pool_mode"], + max_db_connections=max_db_connections, + default_pool_size=default_pool_size, + min_pool_size=min_pool_size, + reserve_pool_size=reserve_pool_size, + stats_user=self.backend.stats_user, + auth_query=self.backend.auth_query, + auth_file=f"{app_conf_dir}/{AUTH_FILE_NAME}", + enable_tls=enable_tls, + key_file=f"{app_conf_dir}/{TLS_KEY_FILE}", + ca_file=f"{app_conf_dir}/{TLS_CA_FILE}", + cert_file=f"{app_conf_dir}/{TLS_CERT_FILE}", + ), + 0o700, + ) + except FileNotFoundError: + logger.warning("Service %s not yet rendered" % service_id) self.unit.status = initial_status if reload_pgbouncer: - self.reload_pgbouncer() + self.reload_pgbouncer(restart) def render_prometheus_service(self): """Render a unit file for the prometheus exporter and restarts the service.""" diff --git a/src/constants.py b/src/constants.py index 8f7953630..d91dd3fda 100644 --- a/src/constants.py +++ b/src/constants.py @@ -61,5 +61,7 @@ "ca": "cauth", } +SOCKET_LOCATION = "/tmp/snap-private-tmp/snap.charmed-postgresql/tmp/pgbouncer/instance_0" + TRACING_RELATION_NAME = "tracing" TRACING_PROTOCOL = "otlp_http" diff --git a/src/relations/pgbouncer_provider.py b/src/relations/pgbouncer_provider.py index 2bbf5b24f..a4dec1276 100644 --- a/src/relations/pgbouncer_provider.py +++ b/src/relations/pgbouncer_provider.py @@ -54,7 +54,6 @@ Application, BlockedStatus, MaintenanceStatus, - ModelError, ) from constants import CLIENT_RELATION_NAME @@ -111,17 +110,6 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: event.defer() return - # If exposed relation, open the listen port for all units - if event.external_node_connectivity: - # Open port - try: - self.charm.unit.open_port("tcp", self.charm.config["listen_port"]) - except ModelError: - logger.exception("failed to open port") - - if not self.charm.unit.is_leader(): - return - # Retrieve the database name and extra user roles using the charm library. database = event.database extra_user_roles = event.extra_user_roles or "" diff --git a/src/upgrade.py b/src/upgrade.py index 8043c5dd5..804c6fd52 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -5,6 +5,7 @@ import json import logging +import os from typing import List from charms.data_platform_libs.v0.upgrade import ( @@ -19,7 +20,7 @@ from tenacity import Retrying, stop_after_attempt, wait_fixed from typing_extensions import override -from constants import SNAP_PACKAGES +from constants import PGB, SNAP_PACKAGES DEFAULT_MESSAGE = "Pre-upgrade check failed and cannot safely upgrade" @@ -75,8 +76,10 @@ def _cluster_checks(self) -> None: def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: # Refresh the charmed PostgreSQL snap and restart the database. self.charm.unit.status = MaintenanceStatus("stopping services") - for service in self.charm.pgb_services: - systemd.service_stop(service) + # If pgb is upgraded from a version that only uses cpu count excess services should be stopped + for service in [f"{PGB}-{self.charm.app.name}@{i}" for i in range(os.cpu_count())]: + if systemd.service_running(service): + systemd.service_stop(service) if self.charm.backend.postgres: self.charm.remove_exporter_service() @@ -87,6 +90,7 @@ def _on_upgrade_granted(self, event: UpgradeGrantedEvent) -> None: if self.charm.unit.is_leader(): self.charm.generate_relation_databases() + self.charm.render_pgb_config() self.charm.render_utility_files() self.charm.reload_pgbouncer() if self.charm.backend.postgres: diff --git a/tests/integration/relations/pgbouncer_provider/test_data_integrator.py b/tests/integration/relations/pgbouncer_provider/test_data_integrator.py index c3a4f6e87..e92024daf 100644 --- a/tests/integration/relations/pgbouncer_provider/test_data_integrator.py +++ b/tests/integration/relations/pgbouncer_provider/test_data_integrator.py @@ -9,12 +9,14 @@ from juju.unit import Unit from pytest_operator.plugin import OpsTest -from constants import BACKEND_RELATION_NAME +from constants import BACKEND_RELATION_NAME, PGB_CONF_DIR from ... import architecture from ...helpers.helpers import ( PG, PGB, + get_cfg, + get_unit_cores, ) from ...juju_ import juju_major_version from .helpers import check_exposed_connection @@ -116,3 +118,30 @@ async def test_remove_tls(ops_test: OpsTest, pgb_charm_jammy): ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].units[0] ) check_exposed_connection(credentials, False) + + +@pytest.mark.group(1) +async def test_change_config(ops_test: OpsTest): + """Change config and assert that the pgbouncer config file looks how we expect.""" + async with ops_test.fast_forward(): + unit = ops_test.model.units[f"{PGB}/0"] + await ops_test.model.applications[PGB].set_config({ + "pool_mode": "transaction", + "max_db_connections": "44", + }) + await ops_test.model.wait_for_idle(apps=[PGB], status="active", timeout=600) + + # The config changes depending on the amount of cores on the unit, so get that info. + cores = max(min(await get_unit_cores(ops_test, unit), 4), 2) + + primary_cfg = await get_cfg(ops_test, unit.name) + + assert primary_cfg["pgbouncer"]["pool_mode"] == "transaction" + assert primary_cfg["pgbouncer"]["max_db_connections"] == "44" + + # Validating service config files are correctly written is handled by render_pgb_config and its + # tests, but we need to make sure they at least exist in the right places. + for service_id in range(cores): + path = f"{PGB_CONF_DIR}/{PGB}/instance_{service_id}/pgbouncer.ini" + service_cfg = await get_cfg(ops_test, unit.name, path=path) + assert service_cfg is not f"cat: {path}: No such file or directory" diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index a42dc38d4..3426d9874 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -18,7 +18,6 @@ WAIT_MSG, get_cfg, get_running_instances, - get_unit_cores, ) from .helpers.postgresql_helpers import restart_machine @@ -72,7 +71,7 @@ async def test_change_config(ops_test: OpsTest): await ops_test.model.wait_for_idle(apps=[PGB], status="active", timeout=600) # The config changes depending on the amount of cores on the unit, so get that info. - cores = max(min(await get_unit_cores(ops_test, unit), 4), 2) + cores = 1 primary_cfg = await get_cfg(ops_test, unit.name) @@ -90,10 +89,9 @@ async def test_change_config(ops_test: OpsTest): @pytest.mark.group(1) async def test_systemd_restarts_pgbouncer_processes(ops_test: OpsTest): unit = ops_test.model.units[f"{PGB}/0"] - expected_processes = max(min(await get_unit_cores(ops_test, unit), 4), 2) # verify the correct amount of pgbouncer processes are running - assert await get_running_instances(ops_test, unit, "pgbouncer") == expected_processes + assert await get_running_instances(ops_test, unit, "pgbouncer") == 1 # Kill pgbouncer process and wait for it to restart await unit.run("pkill -SIGINT -x pgbouncer") @@ -101,7 +99,7 @@ async def test_systemd_restarts_pgbouncer_processes(ops_test: OpsTest): await ops_test.model.wait_for_idle(apps=[PGB], status="active", timeout=(3 * 60)) # verify all processes start again - assert await get_running_instances(ops_test, unit, "pgbouncer") == expected_processes + assert await get_running_instances(ops_test, unit, "pgbouncer") == 1 @pytest.mark.group(1) diff --git a/tests/unit/relations/test_pgbouncer_provider.py b/tests/unit/relations/test_pgbouncer_provider.py index 5a73983db..f930e5693 100644 --- a/tests/unit/relations/test_pgbouncer_provider.py +++ b/tests/unit/relations/test_pgbouncer_provider.py @@ -7,7 +7,11 @@ from ops.testing import Harness from charm import PgBouncerCharm -from constants import BACKEND_RELATION_NAME, CLIENT_RELATION_NAME, PEER_RELATION_NAME +from constants import ( + BACKEND_RELATION_NAME, + CLIENT_RELATION_NAME, + PEER_RELATION_NAME, +) from ..helpers import patch_network_get @@ -88,6 +92,7 @@ def test_on_database_requested( rel_id = event.relation.id = 1 database = event.database = "test-db" event.extra_user_roles = "SUPERUSER" + event.external_node_connectivity = False user = f"relation_id_{rel_id}" # check we exit immediately if backend doesn't exist. diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 65866cd73..6cea843c3 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -3,7 +3,6 @@ import logging import math -import os import platform import unittest from unittest.mock import MagicMock, Mock, PropertyMock, call, patch @@ -111,20 +110,20 @@ def test_on_install( return_value=None, ) def test_on_start(self, _has_relation, _start, _render_prom_service, _, __): - intended_instances = max(min(os.cpu_count(), 4), 2) # Testing charm blocks when systemd is in error self.charm.on.start.emit() # Charm should fail out after calling _start once _start.assert_called_once() self.assertIsInstance(self.harness.model.unit.status, BlockedStatus) + _start.reset_mock() # Testing charm starts the correct amount of pgbouncer instances but enters BlockedStatus # because the backend relation doesn't exist yet. _start.side_effect = None self.charm.on.start.emit() - calls = [call(f"pgbouncer-pgbouncer@{instance}") for instance in range(intended_instances)] - _start.assert_has_calls(calls) + _start.assert_called_once_with("pgbouncer-pgbouncer@0") self.assertIsInstance(self.harness.model.unit.status, BlockedStatus) + _start.reset_mock() # Testing charm starts the correct amount of pgbouncer instances and enters activestatus if # everything's working fine. @@ -132,18 +131,30 @@ def test_on_start(self, _has_relation, _start, _render_prom_service, _, __): _start.side_effect = None _has_relation.return_value = True self.charm.on.start.emit() - calls = [call(f"pgbouncer-pgbouncer@{instance}") for instance in range(intended_instances)] - _start.assert_has_calls(calls) + _start.assert_called_once_with("pgbouncer-pgbouncer@0") _render_prom_service.assert_called_once_with() self.assertIsInstance(self.harness.model.unit.status, ActiveStatus) + _start.reset_mock() + @patch("charms.operator_libs_linux.v1.systemd.service_running") + @patch("charms.operator_libs_linux.v1.systemd.service_reload") @patch("charms.operator_libs_linux.v1.systemd.service_restart") @patch("charm.PgBouncerCharm.check_pgb_running") - def test_reload_pgbouncer(self, _check_pgb_running, _restart): - intended_instances = max(min(os.cpu_count(), 4), 2) + def test_reload_pgbouncer(self, _check_pgb_running, _restart, _reload, _running): + # Reloads if the service is running + self.charm.reload_pgbouncer() + _reload.assert_called_once_with("pgbouncer-pgbouncer@0") + assert not _restart.called + _check_pgb_running.assert_called_once() + _restart.reset_mock() + _reload.reset_mock() + _check_pgb_running.reset_mock() + + # Restarts if service is not running + _running.return_value = False self.charm.reload_pgbouncer() - calls = [call(f"pgbouncer-pgbouncer@{instance}") for instance in range(intended_instances)] - _restart.assert_has_calls(calls) + _restart.assert_called_once_with("pgbouncer-pgbouncer@0") + assert not _reload.called _check_pgb_running.assert_called_once() # Verify that if systemd is in error, the charm enters blocked status. @@ -176,14 +187,11 @@ def test_check_pgb_running(self, _postgres_ready, _running): assert not self.charm.check_pgb_running() self.assertIsInstance(self.charm.unit.status, BlockedStatus) _running.side_effect = None + _running.reset_mock() # otherwise check all services and return activestatus - intended_instances = max(min(os.cpu_count(), 4), 2) assert self.charm.check_pgb_running() - calls = [ - call(f"pgbouncer-pgbouncer@{instance}") for instance in range(0, intended_instances) - ] - _running.assert_has_calls(calls) + _running.assert_any_call("pgbouncer-pgbouncer@0") @patch("charm.PgBouncerCharm.render_pgb_config") @patch("relations.peers.Peers.app_databag", new_callable=PropertyMock) @@ -203,7 +211,7 @@ def test_on_config_changed(self, _app_databag, _render): }) # _read.return_value is modified on config update, but the object reference is the same. - _render.assert_called_with(reload_pgbouncer=True) + _render.assert_called_with(reload_pgbouncer=True, restart=True) @patch("charm.snap.SnapCache") def test_install_snap_packages(self, _snap_cache): @@ -339,7 +347,7 @@ def test_render_pgb_config( template = Template(file.read()) self.charm.render_pgb_config(reload_pgbouncer=True) _reload.assert_called() - effective_db_connections = 100 / self.charm._cores + effective_db_connections = 100 default_pool_size = math.ceil(effective_db_connections / 2) min_pool_size = math.ceil(effective_db_connections / 4) reserve_pool_size = math.ceil(effective_db_connections / 4) @@ -371,30 +379,29 @@ def test_render_pgb_config( "auth_user": "pgbouncer_auth_BACKNEND_USER", }, } - for i in range(self.charm._cores): - expected_content = template.render( - databases=expected_databases, - readonly_databases={}, - peer_id=i, - peers=range(self.charm._cores), - base_socket_dir="/tmp/pgbouncer/instance_", - log_file=f"{PGB_LOG_DIR}/pgbouncer/instance_{i}/pgbouncer.log", - pid_file=f"/tmp/pgbouncer/instance_{i}/pgbouncer.pid", - listen_addr="127.0.0.1", - listen_port=6432, - pool_mode="session", - max_db_connections=100, - default_pool_size=default_pool_size, - min_pool_size=min_pool_size, - reserve_pool_size=reserve_pool_size, - stats_user="pgbouncer_stats_pgbouncer", - auth_query="SELECT username, password FROM pgbouncer_auth_BACKNEND_USER.get_auth($1)", - auth_file=auth_file, - enable_tls=False, - ) - _render.assert_any_call( - f"{PGB_CONF_DIR}/pgbouncer/instance_{i}/pgbouncer.ini", expected_content, 0o700 - ) + expected_content = template.render( + databases=expected_databases, + readonly_databases={}, + peer_id=0, + peers=range(1), + base_socket_dir="/tmp/pgbouncer/instance_", + log_file=f"{PGB_LOG_DIR}/pgbouncer/instance_0/pgbouncer.log", + pid_file="/tmp/pgbouncer/instance_0/pgbouncer.pid", + listen_addr="127.0.0.1", + listen_port=6432, + pool_mode="session", + max_db_connections=100, + default_pool_size=default_pool_size, + min_pool_size=min_pool_size, + reserve_pool_size=reserve_pool_size, + stats_user="pgbouncer_stats_pgbouncer", + auth_query="SELECT username, password FROM pgbouncer_auth_BACKNEND_USER.get_auth($1)", + auth_file=auth_file, + enable_tls=False, + ) + _render.assert_called_once_with( + f"{PGB_CONF_DIR}/pgbouncer/instance_0/pgbouncer.ini", expected_content, 0o700 + ) _render.reset_mock() _reload.reset_mock() @@ -418,30 +425,29 @@ def test_render_pgb_config( self.charm.render_pgb_config() assert not _reload.called - for i in range(self.charm._cores): - expected_content = template.render( - databases=expected_databases, - readonly_databases={}, - peer_id=i, - peers=range(self.charm._cores), - base_socket_dir="/tmp/pgbouncer/instance_", - log_file=f"{PGB_LOG_DIR}/pgbouncer/instance_{i}/pgbouncer.log", - pid_file=f"/tmp/pgbouncer/instance_{i}/pgbouncer.pid", - listen_addr="127.0.0.1", - listen_port=6432, - pool_mode="session", - max_db_connections=0, - default_pool_size=20, - min_pool_size=10, - reserve_pool_size=10, - stats_user="pgbouncer_stats_pgbouncer", - auth_query="SELECT username, password FROM pgbouncer_auth_BACKNEND_USER.get_auth($1)", - auth_file=auth_file, - enable_tls=False, - ) - _render.assert_any_call( - f"{PGB_CONF_DIR}/pgbouncer/instance_{i}/pgbouncer.ini", expected_content, 0o700 - ) + expected_content = template.render( + databases=expected_databases, + readonly_databases={}, + peer_id=0, + peers=range(1), + base_socket_dir="/tmp/pgbouncer/instance_", + log_file=f"{PGB_LOG_DIR}/pgbouncer/instance_0/pgbouncer.log", + pid_file="/tmp/pgbouncer/instance_0/pgbouncer.pid", + listen_addr="127.0.0.1", + listen_port=6432, + pool_mode="session", + max_db_connections=0, + default_pool_size=20, + min_pool_size=10, + reserve_pool_size=10, + stats_user="pgbouncer_stats_pgbouncer", + auth_query="SELECT username, password FROM pgbouncer_auth_BACKNEND_USER.get_auth($1)", + auth_file=auth_file, + enable_tls=False, + ) + _render.assert_called_once_with( + f"{PGB_CONF_DIR}/pgbouncer/instance_0/pgbouncer.ini", expected_content, 0o700 + ) @patch("charm.Peers.app_databag", new_callable=PropertyMock, return_value={}) @patch("charm.PgBouncerCharm.get_secret") @@ -484,6 +490,41 @@ def test_generate_relation_databases_not_leader(self, _): assert self.charm.generate_relation_databases() == {} + @patch("charm.PgBouncerCharm._collect_readonly_dbs") + @patch("charm.PgBouncerCharm.update_status") + @patch("charm.Peers.update_leader") + @patch_network_get(private_address="1.1.1.1") + def test_on_update_status(self, _update_leader, _update_status, _collect_readonly_dbs): + event = Mock() + + self.charm._on_update_status(event) + + _update_leader.assert_called_once_with() + _update_status.assert_called_once_with() + _collect_readonly_dbs.assert_called_once_with() + + @patch("charm.BackendDatabaseRequires.postgres") + @patch( + "charm.PgBouncerCharm.get_relation_databases", return_value={"1": {"name": "excludeddb"}} + ) + @patch_network_get(private_address="1.1.1.1") + def test_collect_readonly_dbs(self, _get_relation_databases, _postgres): + _postgres._connect_to_database().__enter__().cursor().__enter__().fetchall.return_value = ( + ("includeddb",), + ("excludeddb",), + ) + + # don't collect if not leader + self.charm._collect_readonly_dbs() + assert "readonly_dbs" not in self.charm.peers.app_databag + + with self.harness.hooks_disabled(): + self.harness.set_leader() + + self.charm._collect_readonly_dbs() + + assert self.charm.peers.app_databag["readonly_dbs"] == '["includeddb"]' + # # Secrets # diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 7e160531a..916f95004 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -1,5 +1,6 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. +import os import unittest from unittest.mock import Mock, PropertyMock, patch @@ -72,7 +73,7 @@ def test_on_upgrade_granted( self.charm.upgrade._on_upgrade_granted(event) - assert _systemd.service_stop.call_count == len(self.charm.pgb_services) + assert _systemd.service_stop.call_count == os.cpu_count() for svc in self.charm.pgb_services: _systemd.service_stop.assert_any_call(svc) _remove_exporter_service.assert_called_once_with()