Skip to content

Commit

Permalink
Improve logging
Browse files Browse the repository at this point in the history
Signed-off-by: Tobias Wolf <[email protected]>
  • Loading branch information
NotTheEvilOne committed Sep 3, 2024
1 parent 2db446b commit 87c8076
Show file tree
Hide file tree
Showing 8 changed files with 72 additions and 58 deletions.
20 changes: 12 additions & 8 deletions src/rookify/modules/migrate_mds/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def preflight(self) -> None:
for mds_host, mds_daemons in state_data["node"]["ls"]["mds"].items():
if len(mds_daemons) > 1:
raise ModuleException(
"There are more than 1 Ceph MDS daemons running on host {0}".format(
"There are more than 1 ceph-mds daemons running on host {0}".format(
mds_host
)
)
Expand All @@ -51,7 +51,7 @@ def execute(self) -> None:
if mds_host in migrated_mds:
continue

self.logger.debug("Migrating Ceph MDS daemon '{0}'".format(mds_host))
self.logger.info("Migrating ceph-mds daemon at host '{0}'".format(mds_host))

if (
is_migration_required
Expand Down Expand Up @@ -88,13 +88,13 @@ def _disable_mds(self, mds_host: str) -> None:

if result.failed:
raise ModuleException(
"Disabling original Ceph MDS daemon at host {0} failed: {1}".format(
"Disabling original ceph-mds daemon at host {0} failed: {1}".format(
mds_host, result.stderr
)
)

self.logger.debug(
"Waiting for disabled original Ceph MDS daemon '{0}' to disconnect".format(
"Waiting for disabled original ceph-mds daemon at host '{0}' to disconnect".format(
mds_host
)
)
Expand All @@ -107,7 +107,7 @@ def _disable_mds(self, mds_host: str) -> None:

sleep(2)

self.logger.info("Disabled Ceph MDS daemon '{0}'".format(mds_host))
self.logger.info("Disabled ceph-mds daemon at host '{0}'".format(mds_host))

def _set_mds_label(self, mds_host: str) -> None:
node_patch = {"metadata": {"labels": {self.k8s.mds_placement_label: "true"}}}
Expand All @@ -117,12 +117,14 @@ def _set_mds_label(self, mds_host: str) -> None:
not in self.k8s.core_v1_api.patch_node(mds_host, node_patch).metadata.labels
):
raise ModuleException(
"Failed to patch k8s node for Ceph MDS daemon '{0}'".format(mds_host)
"Failed to patch k8s for ceph-mds daemon node '{0}'".format(mds_host)
)

def _enable_rook_based_mds(self, mds_host: str) -> None:
self.logger.debug(
"Enabling and waiting for Rook based MDS daemon '{0}'".format(mds_host)
"Enabling and waiting for Rook based ceph-mds daemon node '{0}'".format(
mds_host
)
)

while True:
Expand All @@ -133,7 +135,9 @@ def _enable_rook_based_mds(self, mds_host: str) -> None:

sleep(2)

self.logger.debug("Rook based MDS daemon '{0}' available".format(mds_host))
self.logger.info(
"Rook based ceph-mds daemon node '{0}' available".format(mds_host)
)

@staticmethod
def register_execution_state(
Expand Down
6 changes: 3 additions & 3 deletions src/rookify/modules/migrate_mds_pools/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def preflight(self) -> None:
for mds_fs_data in state_data["fs"]["ls"]:
if not mds_fs_data["metadata_pool"].endswith("-metadata"):
self.logger.warn(
"MDS filesystem '{0}' uses an incompatible Ceph pool metadata name '{1}' and can not be migrated to Rook automatically".format(
"ceph-mds filesystem '{0}' uses an incompatible pool metadata name '{1}' and can not be migrated to Rook automatically".format(
mds_fs_data["name"], mds_fs_data["metadata_pool"]
)
)
Expand Down Expand Up @@ -84,7 +84,7 @@ def _migrate_pool(self, pool: Dict[str, Any]) -> None:

state_data = self.machine.get_preflight_state("AnalyzeCephHandler").data

self.logger.debug("Migrating Ceph MDS pool '{0}'".format(pool["name"]))
self.logger.info("Migrating ceph-mds pool '{0}'".format(pool["name"]))
osd_pool_configurations = pool["osd_pool_configurations"]

pool_metadata_osd_configuration = osd_pool_configurations[pool["metadata"]]
Expand Down Expand Up @@ -142,7 +142,7 @@ def _migrate_pool(self, pool: Dict[str, Any]) -> None:
"MigrateMdsPoolsHandler"
).migrated_pools = migrated_pools

self.logger.info("Migrated Ceph MDS pool '{0}'".format(pool["name"]))
self.logger.info("Migrated ceph-mds pool '{0}'".format(pool["name"]))

@staticmethod
def register_execution_state(
Expand Down
16 changes: 8 additions & 8 deletions src/rookify/modules/migrate_mgrs/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,21 @@ def _migrate_mgr(self, mgr_host: str) -> None:
if mgr_host in migrated_mgrs:
return

self.logger.debug("Migrating Ceph mgr daemon '{0}'".format(mgr_host))
self.logger.info("Migrating ceph-mgr daemon at host'{0}'".format(mgr_host))

result = self.ssh.command(
mgr_host, "sudo systemctl disable --now ceph-mgr.target"
)

if result.failed:
raise ModuleException(
"Disabling original Ceph mgr daemon at host {0} failed: {1}".format(
"Disabling original ceph-mgr daemon at host {0} failed: {1}".format(
mgr_host, result.stderr
)
)

self.logger.debug(
"Waiting for disabled original Ceph mgr daemon '{0}' to disconnect".format(
"Waiting for disabled original ceph-mgr daemon '{0}' to disconnect".format(
mgr_host
)
)
Expand All @@ -60,7 +60,7 @@ def _migrate_mgr(self, mgr_host: str) -> None:
sleep(2)

self.logger.info(
"Disabled Ceph mgr daemon '{0}' and enabling Rook based Ceph mgr daemon '{0}'".format(
"Disabled ceph-mgr daemon '{0}' and enabling Rook based daemon".format(
mgr_host
)
)
Expand All @@ -72,7 +72,7 @@ def _migrate_mgr(self, mgr_host: str) -> None:
not in self.k8s.core_v1_api.patch_node(mgr_host, node_patch).metadata.labels
):
raise ModuleException(
"Failed to patch k8s node for Ceph mgr daemon '{0}'".format(mgr_host)
"Failed to patch k8s for ceph-mgr daemon node '{0}'".format(mgr_host)
)

migrated_mgrs.append(mgr_host)
Expand All @@ -86,7 +86,7 @@ def _migrate_mgr(self, mgr_host: str) -> None:
)

self.logger.debug(
"Waiting for {0:d} Ceph mgr daemons to be available".format(
"Waiting for {0:d} ceph-mgr daemons to be available".format(
mgr_count_expected
)
)
Expand All @@ -98,8 +98,8 @@ def _migrate_mgr(self, mgr_host: str) -> None:

sleep(2)

self.logger.debug(
"{0:d} Ceph mgr daemons are available".format(mgr_count_expected)
self.logger.info(
"{0:d} ceph-mgr daemons are available".format(mgr_count_expected)
)

@staticmethod
Expand Down
22 changes: 12 additions & 10 deletions src/rookify/modules/migrate_mons/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,21 @@ def _migrate_mon(self, mon: Dict[str, Any]) -> None:
if mon["name"] in migrated_mons:
return

self.logger.debug("Migrating Ceph mon daemon '{0}'".format(mon["name"]))
self.logger.info("Migrating ceph-mon daemon '{0}'".format(mon["name"]))

result = self.ssh.command(
mon["name"], "sudo systemctl disable --now ceph-mon.target"
)

if result.failed:
raise ModuleException(
"Disabling original Ceph mon daemon at host {0} failed: {1}".format(
"Disabling original ceph-mon daemon {0} failed: {1}".format(
mon["name"], result.stderr
)
)

self.logger.debug(
"Waiting for disabled original Ceph mon daemon '{0}' to disconnect".format(
"Waiting for disabled original ceph-mon daemon '{0}' to disconnect".format(
mon["name"]
)
)
Expand All @@ -69,12 +69,12 @@ def _migrate_mon(self, mon: Dict[str, Any]) -> None:

sleep(2)

self.logger.info("Disabled Ceph mon daemon '{0}'".format(mon["name"]))
self.logger.info("Disabled ceph-mon daemon '{0}'".format(mon["name"]))

self.ceph.mon_command("mon remove", name=mon["name"])

self.logger.info(
"Enabling Rook based Ceph mon daemon '{0}'".format(mon["name"])
self.logger.debug(
"Enabling Rook based ceph-mon daemon at node '{0}'".format(mon["name"])
)

node_patch = {"metadata": {"labels": {self.k8s.mon_placement_label: "true"}}}
Expand All @@ -86,7 +86,9 @@ def _migrate_mon(self, mon: Dict[str, Any]) -> None:
).metadata.labels
):
raise ModuleException(
"Failed to patch k8s node for Ceph mon daemon '{0}'".format(mon["name"])
"Failed to patch k8s for ceph-mon daemon at node '{0}'".format(
mon["name"]
)
)

migrated_mons.append(mon["name"])
Expand All @@ -100,7 +102,7 @@ def _migrate_mon(self, mon: Dict[str, Any]) -> None:
)

self.logger.debug(
"Waiting for a quorum of {0:d} Ceph mon daemons".format(mon_count_expected)
"Waiting for a quorum of {0:d} ceph-mon daemons".format(mon_count_expected)
)

while True:
Expand All @@ -110,8 +112,8 @@ def _migrate_mon(self, mon: Dict[str, Any]) -> None:

sleep(2)

self.logger.debug(
"Quorum of {0:d} Ceph mon daemons successful".format(mon_count_expected)
self.logger.info(
"Quorum of {0:d} ceph-mon daemons successful".format(mon_count_expected)
)

@staticmethod
Expand Down
4 changes: 2 additions & 2 deletions src/rookify/modules/migrate_osd_pools/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def _migrate_pool(self, pool: Dict[str, Any]) -> None:
if pool["pool_name"] in migrated_pools:
return

self.logger.debug("Migrating Ceph OSD pool '{0}'".format(pool["pool_name"]))
self.logger.info("Migrating ceph-osd pool '{0}'".format(pool["pool_name"]))

pool_definition_values = {
"cluster_namespace": self._config["rook"]["cluster"]["namespace"],
Expand All @@ -75,7 +75,7 @@ def _migrate_pool(self, pool: Dict[str, Any]) -> None:
"MigrateOSDPoolsHandler"
).migrated_pools = migrated_pools

self.logger.info("Migrated Ceph OSD pool '{0}'".format(pool["pool_name"]))
self.logger.info("Migrated ceph-osd pool '{0}'".format(pool["pool_name"]))

@staticmethod
def register_execution_state(
Expand Down
28 changes: 17 additions & 11 deletions src/rookify/modules/migrate_osds/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ def _get_devices_of_hosts(self) -> Dict[str, Dict[str, str]]:
if osd_id not in osd_devices[osd_host]:
osd_devices[osd_host][osd_id] = osd_device_path

self.logger.debug(
"Analyzed {0:d} Ceph OSD(s) on host '{1}'".format(len(osds), osd_host)
)

return osd_devices

def _get_nodes_osd_devices(self, osd_ids: List[str]) -> List[Dict[str, Any]]:
Expand Down Expand Up @@ -93,7 +97,7 @@ def migrate_osds(self, host: str, osd_ids: List[int]) -> None:
"MigrateOSDsHandler", "migrated_osd_ids", default_value=[]
)

self.logger.info("Migrating Ceph OSD host '{0}'".format(host))
self.logger.info("Migrating ceph-osd host '{0}'".format(host))

node_patch = {"metadata": {"labels": {self.k8s.osd_placement_label: "true"}}}

Expand All @@ -102,15 +106,15 @@ def migrate_osds(self, host: str, osd_ids: List[int]) -> None:
not in self.k8s.core_v1_api.patch_node(host, node_patch).metadata.labels
):
raise ModuleException(
"Failed to patch k8s node for Ceph OSD host '{0}'".format(host)
"Failed to patch k8s for ceph-osd node '{0}'".format(host)
)

for osd_id in osd_ids:
if osd_id in migrated_osd_ids:
return

self.logger.info(
"Migrating Ceph OSD daemon '{0}@{1:d}'".format(host, osd_id)
self.logger.debug(
"Migrating ceph-osd daemon '{0}@{1:d}'".format(host, osd_id)
)

result = self.ssh.command(
Expand All @@ -120,13 +124,13 @@ def migrate_osds(self, host: str, osd_ids: List[int]) -> None:

if result.failed:
raise ModuleException(
"Disabling original Ceph OSD daemon '{0}@{1:d}' failed: {2}".format(
"Disabling original ceph-osd daemon '{0}@{1:d}' failed: {2}".format(
host, osd_id, result.stderr
)
)

self.logger.debug(
"Waiting for disabled original Ceph OSD daemon '{0}@{1:d}' to disconnect".format(
"Waiting for disabled original ceph-osd daemon '{0}@{1:d}' to disconnect".format(
host, osd_id
)
)
Expand All @@ -140,10 +144,10 @@ def migrate_osds(self, host: str, osd_ids: List[int]) -> None:
sleep(2)

self.logger.info(
"Disabled Ceph OSD daemon '{0}@{1:d}'".format(host, osd_id)
"Disabled ceph-osd daemon '{0}@{1:d}'".format(host, osd_id)
)

self.logger.info("Enabling Rook based Ceph OSD host '{0}'".format(host))
self.logger.info("Enabling Rook based ceph-osd node '{0}'".format(host))

nodes_osd_devices = self._get_nodes_osd_devices(migrated_osd_ids + osd_ids)

Expand All @@ -170,7 +174,9 @@ def migrate_osds(self, host: str, osd_ids: List[int]) -> None:

for osd_id in osd_ids:
self.logger.debug(
"Waiting for Rook based OSD daemon '{0}@{1:d}'".format(host, osd_id)
"Waiting for Rook based ceph-osd daemon '{0}@{1:d}'".format(
host, osd_id
)
)

while True:
Expand All @@ -181,8 +187,8 @@ def migrate_osds(self, host: str, osd_ids: List[int]) -> None:

sleep(2)

self.logger.debug(
"Rook based OSD daemon '{0}@{1:d}' available".format(host, osd_id)
self.logger.info(
"Rook based ceph-osd daemon '{0}@{1:d}' available".format(host, osd_id)
)

@staticmethod
Expand Down
6 changes: 3 additions & 3 deletions src/rookify/modules/migrate_rgw_pools/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def preflight(self) -> None:

if zone["osd_pools"][metadata_name].get("erasure_code_profile", "") != "":
raise ModuleException(
"Ceph RGW metadata OSD pools must use replication for Rook"
"ceph-rgw metadata OSD pools must use replication for Rook"
)

self.machine.get_preflight_state("MigrateRgwPoolsHandler").zones = zones
Expand All @@ -81,7 +81,7 @@ def _migrate_zone(self, zone_name: str, zone_data: Dict[str, Any]) -> None:
"MigrateRgwPoolsHandler", "migrated_pools", default_value=[]
)

self.logger.debug("Migrating Ceph RGW zone '{0}'".format(zone_name))
self.logger.info("Migrating ceph-rgw zone '{0}'".format(zone_name))

osd_pools = zone_data["osd_pools"]

Expand Down Expand Up @@ -129,7 +129,7 @@ def _migrate_zone(self, zone_name: str, zone_data: Dict[str, Any]) -> None:
"MigrateRgwPoolsHandler"
).migrated_pools = migrated_pools

self.logger.info("Migrated Ceph RGW zone '{0}'".format(zone_name))
self.logger.info("Migrated ceph-rgw zone '{0}'".format(zone_name))

@staticmethod
def register_execution_state(
Expand Down
Loading

0 comments on commit 87c8076

Please sign in to comment.