Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewjstone committed Oct 14, 2024
1 parent 033b118 commit d41ae57
Show file tree
Hide file tree
Showing 11 changed files with 170 additions and 68 deletions.
22 changes: 20 additions & 2 deletions nexus/db-model/src/clickhouse_policy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,34 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.

//! Database representation of a clickhouse deployment policy

use super::impl_enum_type;
use crate::SqlU32;
use crate::{schema::clickhouse_policy, SqlU8};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};

impl_enum_type!(
#[derive(Clone, SqlType, Debug, QueryId)]
#[diesel(postgres_type(name = "clickhouse_mode", schema = "public"))]
pub struct ClickhouseModeEnum;

#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)]
#[diesel(sql_type = ClickhouseModeEnum)]
pub enum DbClickhouseMode;

// Enum values
SingleNodeOnly => b"single_node_only"
ClusterOnly => b"cluster_only"
Both => b"both"
);

#[derive(Queryable, Clone, Debug, Selectable, Insertable)]
#[diesel(table_name = clickhouse_policy)]
pub struct ClickhousePolicy {
pub version: SqlU32,
pub clickhouse_cluster_enabled: bool,
pub clickhouse_single_node_enabled: bool,
pub clickhouse_mode: DbClickhouseMode,
pub clickhouse_cluster_target_servers: SqlU8,
pub clickhouse_cluster_target_keepers: SqlU8,
pub time_created: DateTime<Utc>,
Expand Down
1 change: 1 addition & 0 deletions nexus/db-model/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ pub use block_size::*;
pub use bootstore::*;
pub use bytecount::*;
pub use certificate::*;
pub use clickhouse_policy::*;
pub use cockroachdb_node_id::*;
pub use collection::*;
pub use console_session::*;
Expand Down
5 changes: 2 additions & 3 deletions nexus/db-model/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -844,11 +844,10 @@ table! {
table! {
clickhouse_policy (version) {
version -> Int8,
clickhouse_cluster_enabled -> Bool,
clickhouse_single_node_enabled -> Bool,
clickhouse_mode -> crate::clickhouse_policy::ClickhouseModeEnum,
clickhouse_cluster_target_servers -> Int2,
clickhouse_cluster_target_keepers -> Int2,
time_created -> Timestamptz
time_created -> Timestamptz,
}
}

Expand Down
47 changes: 47 additions & 0 deletions nexus/db-queries/src/db/datastore/clickhouse_policy.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.

//! Queries related to clickhouse policy

use super::DataStore;
use crate::authz;
use crate::context::OpContext;
use crate::db;
use crate::db::error::public_error_from_diesel;
use crate::db::error::ErrorHandler;
use crate::db::pagination::paginated;
use async_bb8_diesel::AsyncRunQueryDsl;
use diesel::expression::SelectableHelper;
use diesel::QueryDsl;
use nexus_db_model::ClickhousePolicy as DbClickhousePolicy;
use nexus_db_model::SqlU32;
use nexus_types::deployment::ClickhousePolicy;
use omicron_common::api::external::DataPageParams;
use omicron_common::api::external::ListResultVec;

impl DataStore {
pub async fn clickhouse_policy_list(
&self,
opctx: &OpContext,
pagparams: &DataPageParams<'_, SqlU32>,
) -> ListResultVec<Option<ClickhousePolicy>> {
use db::schema::clickhouse_policy;

opctx
.authorize(authz::Action::ListChildren, &authz::BLUEPRINT_CONFIG)
.await?;

let policies = paginated(
clickhouse_policy::table,
clickhouse_policy::version,
pagparams,
)
.select(DbClickhousePolicy::as_select())
.get_results_async(&*self.pool_connection_authorized(opctx).await?)
.await
.map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?;

Ok(policies.into_iter().map(|p| p.into_clickhouse_policy).collect())
}
}
1 change: 1 addition & 0 deletions nexus/db-queries/src/db/datastore/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ mod bfd;
mod bgp;
mod bootstore;
mod certificate;
mod clickhouse_policy;
mod cockroachdb_node_id;
mod cockroachdb_settings;
mod console_session;
Expand Down
61 changes: 28 additions & 33 deletions nexus/reconfigurator/planning/src/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2622,11 +2622,8 @@ mod test {

// Enable clickhouse clusters via policy
let mut input_builder = input.into_builder();
input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy {
deploy_with_standalone: true,
target_servers,
target_keepers,
});
input_builder.policy_mut().clickhouse_policy =
Some(ClickhousePolicy::Both { target_servers, target_keepers });

// Creating a new blueprint should deploy all the new clickhouse zones
let input = input_builder.build();
Expand Down Expand Up @@ -2659,8 +2656,8 @@ mod test {
.map(|z| z.id)
.collect();

assert_eq!(keeper_zone_ids.len(), target_keepers);
assert_eq!(server_zone_ids.len(), target_servers);
assert_eq!(keeper_zone_ids.len(), target_keepers as usize);
assert_eq!(server_zone_ids.len(), target_servers as usize);

// We should be attempting to allocate all servers and keepers since
// this the initial configuration
Expand All @@ -2676,8 +2673,14 @@ mod test {
clickhouse_cluster_config.max_used_server_id,
(target_servers as u64).into()
);
assert_eq!(clickhouse_cluster_config.keepers.len(), target_keepers);
assert_eq!(clickhouse_cluster_config.servers.len(), target_servers);
assert_eq!(
clickhouse_cluster_config.keepers.len(),
target_keepers as usize
);
assert_eq!(
clickhouse_cluster_config.servers.len(),
target_servers as usize
);

// Ensure that the added keepers are in server zones
for zone_id in clickhouse_cluster_config.keepers.keys() {
Expand Down Expand Up @@ -2769,11 +2772,8 @@ mod test {
// Enable clickhouse clusters via policy
let target_keepers = 5;
let mut input_builder = input.into_builder();
input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy {
deploy_with_standalone: true,
target_servers,
target_keepers,
});
input_builder.policy_mut().clickhouse_policy =
Some(ClickhousePolicy::Both { target_servers, target_keepers });
let input = input_builder.build();
let blueprint5 = Planner::new_based_on(
log.clone(),
Expand All @@ -2799,7 +2799,7 @@ mod test {
.collect();

// We should have allocated 2 new keeper zones
assert_eq!(new_keeper_zone_ids.len(), target_keepers);
assert_eq!(new_keeper_zone_ids.len(), target_keepers as usize);
assert!(keeper_zone_ids.is_subset(&new_keeper_zone_ids));

// We should be trying to provision one new keeper for a keeper zone
Expand Down Expand Up @@ -2869,7 +2869,7 @@ mod test {
bp7_config.keepers.len(),
bp7_config.max_used_keeper_id.0 as usize
);
assert_eq!(bp7_config.keepers.len(), target_keepers);
assert_eq!(bp7_config.keepers.len(), target_keepers as usize);
assert_eq!(
bp7_config.highest_seen_keeper_leader_committed_log_index,
2
Expand Down Expand Up @@ -2909,7 +2909,7 @@ mod test {
bp7_config.max_used_keeper_id
);
assert_eq!(bp8_config.keepers, bp7_config.keepers);
assert_eq!(bp7_config.keepers.len(), target_keepers);
assert_eq!(bp7_config.keepers.len(), target_keepers as usize);
assert_eq!(
bp8_config.highest_seen_keeper_leader_committed_log_index,
3
Expand All @@ -2935,11 +2935,8 @@ mod test {

// Enable clickhouse clusters via policy
let mut input_builder = input.into_builder();
input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy {
deploy_with_standalone: true,
target_servers,
target_keepers,
});
input_builder.policy_mut().clickhouse_policy =
Some(ClickhousePolicy::Both { target_servers, target_keepers });
let input = input_builder.build();

// Create a new blueprint to deploy all our clickhouse zones
Expand Down Expand Up @@ -2972,8 +2969,8 @@ mod test {
.map(|z| z.id)
.collect();

assert_eq!(keeper_zone_ids.len(), target_keepers);
assert_eq!(server_zone_ids.len(), target_servers);
assert_eq!(keeper_zone_ids.len(), target_keepers as usize);
assert_eq!(server_zone_ids.len(), target_servers as usize);

// Directly manipulate the blueprint and inventory so that the
// clickhouse clusters are stable
Expand Down Expand Up @@ -3118,7 +3115,7 @@ mod test {
// We've only added one keeper from our desired state
// This brings us back up to our target count
assert_eq!(config.keepers.len(), old_config.keepers.len() + 1);
assert_eq!(config.keepers.len(), target_keepers);
assert_eq!(config.keepers.len(), target_keepers as usize);
// We've allocated one new keeper
assert_eq!(
config.max_used_keeper_id,
Expand All @@ -3142,11 +3139,8 @@ mod test {

// Enable clickhouse clusters via policy
let mut input_builder = input.into_builder();
input_builder.policy_mut().clickhouse_policy = Some(ClickhousePolicy {
deploy_with_standalone: true,
target_servers,
target_keepers,
});
input_builder.policy_mut().clickhouse_policy =
Some(ClickhousePolicy::Both { target_servers, target_keepers });
let input = input_builder.build();

// Create a new blueprint to deploy all our clickhouse zones
Expand Down Expand Up @@ -3179,12 +3173,13 @@ mod test {
.map(|z| z.id)
.collect();

assert_eq!(keeper_zone_ids.len(), target_keepers);
assert_eq!(server_zone_ids.len(), target_servers);
assert_eq!(keeper_zone_ids.len(), target_keepers as usize);
assert_eq!(server_zone_ids.len(), target_servers as usize);

// Disable clickhouse clusters via policy
let mut input_builder = input.into_builder();
input_builder.policy_mut().clickhouse_policy = None;
input_builder.policy_mut().clickhouse_policy =
Some(ClickhousePolicy::SingleNodeOnly);
let input = input_builder.build();

// Create a new blueprint with the disabled policy
Expand Down
52 changes: 35 additions & 17 deletions nexus/types/src/deployment/planning_input.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,14 @@ impl PlanningInput {

pub fn target_clickhouse_zone_count(&self) -> usize {
if let Some(policy) = &self.policy.clickhouse_policy {
if policy.deploy_with_standalone {
SINGLE_NODE_CLICKHOUSE_REDUNDANCY
} else {
0
match policy {
ClickhousePolicy::SingleNodeOnly => {
SINGLE_NODE_CLICKHOUSE_REDUNDANCY
}
ClickhousePolicy::ClusterOnly { .. } => 0,
ClickhousePolicy::Both { .. } => {
SINGLE_NODE_CLICKHOUSE_REDUNDANCY
}
}
} else {
SINGLE_NODE_CLICKHOUSE_REDUNDANCY
Expand All @@ -138,15 +142,15 @@ impl PlanningInput {
self.policy
.clickhouse_policy
.as_ref()
.map(|policy| policy.target_servers)
.map(|policy| policy.target_servers() as usize)
.unwrap_or(0)
}

pub fn target_clickhouse_keeper_zone_count(&self) -> usize {
self.policy
.clickhouse_policy
.as_ref()
.map(|policy| policy.target_keepers)
.map(|policy| policy.target_keepers() as usize)
.unwrap_or(0)
}

Expand Down Expand Up @@ -876,20 +880,34 @@ pub struct Policy {
pub clickhouse_policy: Option<ClickhousePolicy>,
}

/// Policy for replicated clickhouse setups
/// How to deploy clickhouse nodes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClickhousePolicy {
/// Should we run the single-node cluster alongside the replicated cluster?
/// This is stage 1 of our deployment plan as laid out in RFD 468
///
/// If this is set to false, then we will only deploy replicated clusters.
pub deploy_with_standalone: bool,
pub enum ClickhousePolicy {
SingleNodeOnly,
ClusterOnly { target_servers: u8, target_keepers: u8 },
Both { target_servers: u8, target_keepers: u8 },
}

/// Desired number of clickhouse servers
pub target_servers: usize,
impl ClickhousePolicy {
fn target_servers(&self) -> u8 {
match self {
ClickhousePolicy::SingleNodeOnly => 0,
ClickhousePolicy::ClusterOnly { target_servers, .. } => {
*target_servers
}
ClickhousePolicy::Both { target_servers, .. } => *target_servers,
}
}

/// Desired number of clickhouse keepers
pub target_keepers: usize,
fn target_keepers(&self) -> u8 {
match self {
ClickhousePolicy::SingleNodeOnly => 0,
ClickhousePolicy::ClusterOnly { target_keepers, .. } => {
*target_keepers
}
ClickhousePolicy::Both { target_keepers, .. } => *target_keepers,
}
}
}

#[derive(Debug, Clone, Serialize, Deserialize)]
Expand Down
8 changes: 0 additions & 8 deletions schema/crdb/clickhouse-policy/up.sql

This file was deleted.

5 changes: 5 additions & 0 deletions schema/crdb/clickhouse-policy/up1.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
CREATE TYPE IF NOT EXISTS omicron.public.clickhouse_mode AS ENUM (
'single_node_only',
'cluster_only',
'both'
);
7 changes: 7 additions & 0 deletions schema/crdb/clickhouse-policy/up2.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS omicron.public.clickhouse_policy (
version INT8 PRIMARY KEY,
clickhouse_mode omicron.public.clickhouse_mode NOT NULL,
clickhouse_cluster_target_servers INT2 NOT NULL,
clickhouse_cluster_target_keepers INT2 NOT NULL,
time_created TIMESTAMPTZ NOT NULL
);
Loading

0 comments on commit d41ae57

Please sign in to comment.