-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add support to migrate Ceph OSD pools
Signed-off-by: Tobias Wolf <[email protected]>
- Loading branch information
1 parent
7cdd5e1
commit 1daca19
Showing
3 changed files
with
143 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
# -*- coding: utf-8 -*- | ||
|
||
from .main import MigrateOSDPoolsHandler as ModuleHandler # noqa |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
# -*- coding: utf-8 -*- | ||
|
||
from typing import Any, Dict | ||
from ..machine import Machine | ||
from ..module import ModuleHandler | ||
|
||
|
||
class MigrateOSDPoolsHandler(ModuleHandler): | ||
REQUIRES = ["analyze_ceph", "migrate_mds_pools", "migrate_rgw_pools"] | ||
|
||
def execute(self) -> None: | ||
state_data = self.machine.get_preflight_state("AnalyzeCephHandler").data | ||
|
||
migrated_mds_pools = getattr( | ||
self.machine.get_execution_state("MigrateMdsPoolsHandler"), | ||
"migrated_pools", | ||
[], | ||
) | ||
|
||
migrated_rgw_pools = getattr( | ||
self.machine.get_execution_state("MigrateRgwPoolsHandler"), | ||
"migrated_pools", | ||
[], | ||
) | ||
|
||
migrated_pools = migrated_mds_pools + migrated_rgw_pools | ||
|
||
osd_pool_configurations = self.ceph.get_osd_pool_configurations_from_osd_dump( | ||
state_data["osd"]["dump"] | ||
) | ||
|
||
pools = [] | ||
|
||
for pool in osd_pool_configurations.values(): | ||
if pool["pool_name"] not in migrated_pools: | ||
pools.append(pool) | ||
|
||
for pool in pools: | ||
self._migrate_pool(pool) | ||
|
||
def _migrate_pool(self, pool: Dict[str, Any]) -> None: | ||
migrated_pools = getattr( | ||
self.machine.get_execution_state("MigrateOSDPoolsHandler"), | ||
"migrated_pools", | ||
[], | ||
) | ||
|
||
if pool["pool_name"] in migrated_pools: | ||
return | ||
|
||
self.logger.debug("Migrating Ceph OSD pool '{0}'".format(pool["pool_name"])) | ||
|
||
pool_definition_values = { | ||
"cluster_namespace": self._config["rook"]["cluster"]["namespace"], | ||
"name": pool["pool_name"], | ||
"size": pool["size"], | ||
} | ||
|
||
# Render cluster config from template | ||
pool_definition = self.load_template("pool.yaml.j2", **pool_definition_values) | ||
|
||
self.k8s.crd_api_apply(pool_definition.yaml) | ||
migrated_pools.append(pool["pool_name"]) | ||
|
||
self.logger.info("Migrated Ceph OSD pool '{0}'".format(pool["pool_name"])) | ||
|
||
@staticmethod | ||
def register_execution_state( | ||
machine: Machine, state_name: str, handler: ModuleHandler, **kwargs: Any | ||
) -> None: | ||
ModuleHandler.register_execution_state( | ||
machine, state_name, handler, tags=["migrated_pools"] | ||
) |
67 changes: 67 additions & 0 deletions
67
src/rookify/modules/migrate_osd_pools/templates/pool.yaml.j2
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
--- | ||
################################################################################################################# | ||
# Create a Ceph pool with settings for replication in production environments. A minimum of 3 OSDs on | ||
# different hosts are required in this example. | ||
# kubectl create -f pool.yaml | ||
################################################################################################################# | ||
|
||
apiVersion: ceph.rook.io/v1 | ||
kind: CephBlockPool | ||
metadata: | ||
name: {{ name }} | ||
namespace: {{ cluster_namespace }} | ||
spec: | ||
# The failure domain will spread the replicas of the data across different failure zones | ||
failureDomain: host | ||
# For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy. | ||
replicated: | ||
size: {{ size }} | ||
# Disallow setting pool with replica 1, this could lead to data loss without recovery. | ||
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want | ||
requireSafeReplicaSize: true | ||
# hybridStorage: | ||
# primaryDeviceClass: ssd | ||
# secondaryDeviceClass: hdd | ||
# The number for replicas per failure domain, the value must be a divisor of the replica count. If specified, the most common value is 2 for stretch clusters, where the replica count would be 4. | ||
# replicasPerFailureDomain: 2 | ||
# The name of the failure domain to place further down replicas | ||
# subFailureDomain: host | ||
# Ceph CRUSH root location of the rule | ||
# For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#types-and-buckets | ||
#crushRoot: my-root | ||
# The Ceph CRUSH device class associated with the CRUSH replicated rule | ||
# For reference: https://docs.ceph.com/docs/master/rados/operations/crush-map/#device-classes | ||
# If device classes are specified, ensure this property is added to every pool in the cluster, | ||
# otherwise Ceph will warn about pools with overlapping roots. | ||
#deviceClass: my-class | ||
# Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. | ||
# For reference: https://docs.ceph.com/docs/master/mgr/prometheus/#rbd-io-statistics | ||
# enableRBDStats: true | ||
# Set any property on a given pool | ||
# see https://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values | ||
parameters: | ||
# Inline compression mode for the data pool | ||
# Further reference: https://docs.ceph.com/docs/master/rados/configuration/bluestore-config-ref/#inline-compression | ||
compression_mode: none | ||
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool | ||
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size | ||
#target_size_ratio: ".5" | ||
mirroring: | ||
enabled: false | ||
# mirroring mode: pool level or per image | ||
# for more details see: https://docs.ceph.com/docs/master/rbd/rbd-mirroring/#enable-mirroring | ||
mode: image | ||
# specify the schedule(s) on which snapshots should be taken | ||
# snapshotSchedules: | ||
# - interval: 24h # daily snapshots | ||
# startTime: 14:00:00-05:00 | ||
# reports pool mirroring status if enabled | ||
statusCheck: | ||
mirror: | ||
disabled: false | ||
interval: 60s | ||
# quota in bytes and/or objects, default value is 0 (unlimited) | ||
# see https://docs.ceph.com/en/latest/rados/operations/pools/#set-pool-quotas | ||
# quotas: | ||
# maxSize: "10Gi" # valid suffixes include k, M, G, T, P, E, Ki, Mi, Gi, Ti, Pi, Ei | ||
# maxObjects: 1000000000 # 1 billion objects |