From d41ae5792d524a2b7956c30cb52f8f83bdf53e67 Mon Sep 17 00:00:00 2001 From: "Andrew J. Stone" Date: Mon, 14 Oct 2024 19:08:18 +0000 Subject: [PATCH] wip --- nexus/db-model/src/clickhouse_policy.rs | 22 ++++++- nexus/db-model/src/lib.rs | 1 + nexus/db-model/src/schema.rs | 5 +- .../src/db/datastore/clickhouse_policy.rs | 47 ++++++++++++++ nexus/db-queries/src/db/datastore/mod.rs | 1 + nexus/reconfigurator/planning/src/planner.rs | 61 +++++++++---------- nexus/types/src/deployment/planning_input.rs | 52 ++++++++++------ schema/crdb/clickhouse-policy/up.sql | 8 --- schema/crdb/clickhouse-policy/up1.sql | 5 ++ schema/crdb/clickhouse-policy/up2.sql | 7 +++ schema/crdb/dbinit.sql | 29 +++++++-- 11 files changed, 170 insertions(+), 68 deletions(-) create mode 100644 nexus/db-queries/src/db/datastore/clickhouse_policy.rs delete mode 100644 schema/crdb/clickhouse-policy/up.sql create mode 100644 schema/crdb/clickhouse-policy/up1.sql create mode 100644 schema/crdb/clickhouse-policy/up2.sql diff --git a/nexus/db-model/src/clickhouse_policy.rs b/nexus/db-model/src/clickhouse_policy.rs index bbcd8b04ad..68a5b59306 100644 --- a/nexus/db-model/src/clickhouse_policy.rs +++ b/nexus/db-model/src/clickhouse_policy.rs @@ -2,16 +2,34 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +//! Database representation of a clickhouse deployment policy + +use super::impl_enum_type; use crate::SqlU32; use crate::{schema::clickhouse_policy, SqlU8}; use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + +impl_enum_type!( + #[derive(Clone, SqlType, Debug, QueryId)] + #[diesel(postgres_type(name = "clickhouse_mode", schema = "public"))] + pub struct ClickhouseModeEnum; + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + #[diesel(sql_type = ClickhouseModeEnum)] + pub enum DbClickhouseMode; + + // Enum values + SingleNodeOnly => b"single_node_only" + ClusterOnly => b"cluster_only" + Both => b"both" +); #[derive(Queryable, Clone, Debug, Selectable, Insertable)] #[diesel(table_name = clickhouse_policy)] pub struct ClickhousePolicy { pub version: SqlU32, - pub clickhouse_cluster_enabled: bool, - pub clickhouse_single_node_enabled: bool, + pub clickhouse_mode: DbClickhouseMode, pub clickhouse_cluster_target_servers: SqlU8, pub clickhouse_cluster_target_keepers: SqlU8, pub time_created: DateTime, diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 6b9bb9934e..9137b0b3c3 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -134,6 +134,7 @@ pub use block_size::*; pub use bootstore::*; pub use bytecount::*; pub use certificate::*; +pub use clickhouse_policy::*; pub use cockroachdb_node_id::*; pub use collection::*; pub use console_session::*; diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 7e1098753d..e0942d6b3b 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -844,11 +844,10 @@ table! { table! { clickhouse_policy (version) { version -> Int8, - clickhouse_cluster_enabled -> Bool, - clickhouse_single_node_enabled -> Bool, + clickhouse_mode -> crate::clickhouse_policy::ClickhouseModeEnum, clickhouse_cluster_target_servers -> Int2, clickhouse_cluster_target_keepers -> Int2, - time_created -> Timestamptz + time_created -> Timestamptz, } } diff --git a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs new file mode 100644 index 0000000000..da772da8a9 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs @@ -0,0 +1,47 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Queries related to clickhouse policy + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::error::public_error_from_diesel; +use crate::db::error::ErrorHandler; +use crate::db::pagination::paginated; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::expression::SelectableHelper; +use diesel::QueryDsl; +use nexus_db_model::ClickhousePolicy as DbClickhousePolicy; +use nexus_db_model::SqlU32; +use nexus_types::deployment::ClickhousePolicy; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::ListResultVec; + +impl DataStore { + pub async fn clickhouse_policy_list( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, SqlU32>, + ) -> ListResultVec> { + use db::schema::clickhouse_policy; + + opctx + .authorize(authz::Action::ListChildren, &authz::BLUEPRINT_CONFIG) + .await?; + + let policies = paginated( + clickhouse_policy::table, + clickhouse_policy::version, + pagparams, + ) + .select(DbClickhousePolicy::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(policies.into_iter().map(|p| p.into_clickhouse_policy).collect()) + } +} diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 258e43f18c..e724d3d9af 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -55,6 +55,7 @@ mod bfd; mod bgp; mod bootstore; mod certificate; +mod clickhouse_policy; mod cockroachdb_node_id; mod cockroachdb_settings; mod console_session; diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 145be867c6..a8db86cdc6 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -2622,11 +2622,8 @@ mod test { // Enable clickhouse clusters via policy let mut input_builder = input.into_builder(); - input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy { - deploy_with_standalone: true, - target_servers, - target_keepers, - }); + input_builder.policy_mut().clickhouse_policy = + Some(ClickhousePolicy::Both { target_servers, target_keepers }); // Creating a new blueprint should deploy all the new clickhouse zones let input = input_builder.build(); @@ -2659,8 +2656,8 @@ mod test { .map(|z| z.id) .collect(); - assert_eq!(keeper_zone_ids.len(), target_keepers); - assert_eq!(server_zone_ids.len(), target_servers); + assert_eq!(keeper_zone_ids.len(), target_keepers as usize); + assert_eq!(server_zone_ids.len(), target_servers as usize); // We should be attempting to allocate all servers and keepers since // this the initial configuration @@ -2676,8 +2673,14 @@ mod test { clickhouse_cluster_config.max_used_server_id, (target_servers as u64).into() ); - assert_eq!(clickhouse_cluster_config.keepers.len(), target_keepers); - assert_eq!(clickhouse_cluster_config.servers.len(), target_servers); + assert_eq!( + clickhouse_cluster_config.keepers.len(), + target_keepers as usize + ); + assert_eq!( + clickhouse_cluster_config.servers.len(), + target_servers as usize + ); // Ensure that the added keepers are in server zones for zone_id in clickhouse_cluster_config.keepers.keys() { @@ -2769,11 +2772,8 @@ mod test { // Enable clickhouse clusters via policy let target_keepers = 5; let mut input_builder = input.into_builder(); - input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy { - deploy_with_standalone: true, - target_servers, - target_keepers, - }); + input_builder.policy_mut().clickhouse_policy = + Some(ClickhousePolicy::Both { target_servers, target_keepers }); let input = input_builder.build(); let blueprint5 = Planner::new_based_on( log.clone(), @@ -2799,7 +2799,7 @@ mod test { .collect(); // We should have allocated 2 new keeper zones - assert_eq!(new_keeper_zone_ids.len(), target_keepers); + assert_eq!(new_keeper_zone_ids.len(), target_keepers as usize); assert!(keeper_zone_ids.is_subset(&new_keeper_zone_ids)); // We should be trying to provision one new keeper for a keeper zone @@ -2869,7 +2869,7 @@ mod test { bp7_config.keepers.len(), bp7_config.max_used_keeper_id.0 as usize ); - assert_eq!(bp7_config.keepers.len(), target_keepers); + assert_eq!(bp7_config.keepers.len(), target_keepers as usize); assert_eq!( bp7_config.highest_seen_keeper_leader_committed_log_index, 2 @@ -2909,7 +2909,7 @@ mod test { bp7_config.max_used_keeper_id ); assert_eq!(bp8_config.keepers, bp7_config.keepers); - assert_eq!(bp7_config.keepers.len(), target_keepers); + assert_eq!(bp7_config.keepers.len(), target_keepers as usize); assert_eq!( bp8_config.highest_seen_keeper_leader_committed_log_index, 3 @@ -2935,11 +2935,8 @@ mod test { // Enable clickhouse clusters via policy let mut input_builder = input.into_builder(); - input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy { - deploy_with_standalone: true, - target_servers, - target_keepers, - }); + input_builder.policy_mut().clickhouse_policy = + Some(ClickhousePolicy::Both { target_servers, target_keepers }); let input = input_builder.build(); // Create a new blueprint to deploy all our clickhouse zones @@ -2972,8 +2969,8 @@ mod test { .map(|z| z.id) .collect(); - assert_eq!(keeper_zone_ids.len(), target_keepers); - assert_eq!(server_zone_ids.len(), target_servers); + assert_eq!(keeper_zone_ids.len(), target_keepers as usize); + assert_eq!(server_zone_ids.len(), target_servers as usize); // Directly manipulate the blueprint and inventory so that the // clickhouse clusters are stable @@ -3118,7 +3115,7 @@ mod test { // We've only added one keeper from our desired state // This brings us back up to our target count assert_eq!(config.keepers.len(), old_config.keepers.len() + 1); - assert_eq!(config.keepers.len(), target_keepers); + assert_eq!(config.keepers.len(), target_keepers as usize); // We've allocated one new keeper assert_eq!( config.max_used_keeper_id, @@ -3142,11 +3139,8 @@ mod test { // Enable clickhouse clusters via policy let mut input_builder = input.into_builder(); - input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy { - deploy_with_standalone: true, - target_servers, - target_keepers, - }); + input_builder.policy_mut().clickhouse_policy = + Some(ClickhousePolicy::Both { target_servers, target_keepers }); let input = input_builder.build(); // Create a new blueprint to deploy all our clickhouse zones @@ -3179,12 +3173,13 @@ mod test { .map(|z| z.id) .collect(); - assert_eq!(keeper_zone_ids.len(), target_keepers); - assert_eq!(server_zone_ids.len(), target_servers); + assert_eq!(keeper_zone_ids.len(), target_keepers as usize); + assert_eq!(server_zone_ids.len(), target_servers as usize); // Disable clickhouse clusters via policy let mut input_builder = input.into_builder(); - input_builder.policy_mut().clickhouse_policy = None; + input_builder.policy_mut().clickhouse_policy = + Some(ClickhousePolicy::SingleNodeOnly); let input = input_builder.build(); // Create a new blueprint with the disabled policy diff --git a/nexus/types/src/deployment/planning_input.rs b/nexus/types/src/deployment/planning_input.rs index 5541df60e6..cffb44dd75 100644 --- a/nexus/types/src/deployment/planning_input.rs +++ b/nexus/types/src/deployment/planning_input.rs @@ -124,10 +124,14 @@ impl PlanningInput { pub fn target_clickhouse_zone_count(&self) -> usize { if let Some(policy) = &self.policy.clickhouse_policy { - if policy.deploy_with_standalone { - SINGLE_NODE_CLICKHOUSE_REDUNDANCY - } else { - 0 + match policy { + ClickhousePolicy::SingleNodeOnly => { + SINGLE_NODE_CLICKHOUSE_REDUNDANCY + } + ClickhousePolicy::ClusterOnly { .. } => 0, + ClickhousePolicy::Both { .. } => { + SINGLE_NODE_CLICKHOUSE_REDUNDANCY + } } } else { SINGLE_NODE_CLICKHOUSE_REDUNDANCY @@ -138,7 +142,7 @@ impl PlanningInput { self.policy .clickhouse_policy .as_ref() - .map(|policy| policy.target_servers) + .map(|policy| policy.target_servers() as usize) .unwrap_or(0) } @@ -146,7 +150,7 @@ impl PlanningInput { self.policy .clickhouse_policy .as_ref() - .map(|policy| policy.target_keepers) + .map(|policy| policy.target_keepers() as usize) .unwrap_or(0) } @@ -876,20 +880,34 @@ pub struct Policy { pub clickhouse_policy: Option, } -/// Policy for replicated clickhouse setups +/// How to deploy clickhouse nodes #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ClickhousePolicy { - /// Should we run the single-node cluster alongside the replicated cluster? - /// This is stage 1 of our deployment plan as laid out in RFD 468 - /// - /// If this is set to false, then we will only deploy replicated clusters. - pub deploy_with_standalone: bool, +pub enum ClickhousePolicy { + SingleNodeOnly, + ClusterOnly { target_servers: u8, target_keepers: u8 }, + Both { target_servers: u8, target_keepers: u8 }, +} - /// Desired number of clickhouse servers - pub target_servers: usize, +impl ClickhousePolicy { + fn target_servers(&self) -> u8 { + match self { + ClickhousePolicy::SingleNodeOnly => 0, + ClickhousePolicy::ClusterOnly { target_servers, .. } => { + *target_servers + } + ClickhousePolicy::Both { target_servers, .. } => *target_servers, + } + } - /// Desired number of clickhouse keepers - pub target_keepers: usize, + fn target_keepers(&self) -> u8 { + match self { + ClickhousePolicy::SingleNodeOnly => 0, + ClickhousePolicy::ClusterOnly { target_keepers, .. } => { + *target_keepers + } + ClickhousePolicy::Both { target_keepers, .. } => *target_keepers, + } + } } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/schema/crdb/clickhouse-policy/up.sql b/schema/crdb/clickhouse-policy/up.sql deleted file mode 100644 index 07eeca8083..0000000000 --- a/schema/crdb/clickhouse-policy/up.sql +++ /dev/null @@ -1,8 +0,0 @@ -CREATE TABLE IF NOT EXISTS omicron.public.clickhouse_policy ( - version INT8 PRIMARY KEY, - clickhouse_cluster_enabled BOOL NOT NULL - clickhouse_single_node_enabled BOOL NOT NULL - clickhouse_cluster_target_servers INT2 NOT NULL - clickhouse_cluster_target_keepers INT2 NOT NULL - time_created TIMESTAMPTZ NOT NULL, -); diff --git a/schema/crdb/clickhouse-policy/up1.sql b/schema/crdb/clickhouse-policy/up1.sql new file mode 100644 index 0000000000..506e329b21 --- /dev/null +++ b/schema/crdb/clickhouse-policy/up1.sql @@ -0,0 +1,5 @@ +CREATE TYPE IF NOT EXISTS omicron.public.clickhouse_mode AS ENUM ( + 'single_node_only', + 'cluster_only', + 'both' +); diff --git a/schema/crdb/clickhouse-policy/up2.sql b/schema/crdb/clickhouse-policy/up2.sql new file mode 100644 index 0000000000..52c950a570 --- /dev/null +++ b/schema/crdb/clickhouse-policy/up2.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS omicron.public.clickhouse_policy ( + version INT8 PRIMARY KEY, + clickhouse_mode omicron.public.clickhouse_mode NOT NULL, + clickhouse_cluster_target_servers INT2 NOT NULL, + clickhouse_cluster_target_keepers INT2 NOT NULL, + time_created TIMESTAMPTZ NOT NULL +); diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index ee92aca873..3a2f2a3a81 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -42,6 +42,22 @@ ALTER DEFAULT PRIVILEGES GRANT INSERT, SELECT, UPDATE, DELETE ON TABLES to omicr */ ALTER RANGE default CONFIGURE ZONE USING num_replicas = 5; + +/* + * The deployment strategy for clickhouse + */ +CREATE TYPE IF NOT EXISTS omicron.public.clickhouse_mode AS ENUM ( + -- Only deploy a single node clickhouse + 'single_node_only', + + -- Only deploy a clickhouse cluster without any single node deployments + 'cluster_only', + + -- Deploy both a single node and cluster deployment. + -- This is the strategy for stage 1 described in RFD 468 + 'both' +); + /* * A planning policy for clickhouse for a single multirack setup * @@ -56,11 +72,14 @@ CREATE TABLE IF NOT EXISTS omicron.public.clickhouse_policy ( -- multirack to associate with some sort of rack group ID. version INT8 PRIMARY KEY, - clickhouse_cluster_enabled BOOL NOT NULL - clickhouse_single_node_enabled BOOL NOT NULL - clickhouse_cluster_target_servers INT2 NOT NULL - clickhouse_cluster_target_keepers INT2 NOT NULL - time_created TIMESTAMPTZ NOT NULL, + clickhouse_mode omicron.public.clickhouse_mode NOT NULL, + + -- Only greater than 0 when clickhouse cluster is enabled + clickhouse_cluster_target_servers INT2 NOT NULL, + -- Only greater than 0 when clickhouse cluster is enabled + clickhouse_cluster_target_keepers INT2 NOT NULL, + + time_created TIMESTAMPTZ NOT NULL ); /*