Skip to content

Commit

Permalink
Tests
Browse files Browse the repository at this point in the history
  • Loading branch information
smklein committed Jul 22, 2024
1 parent a80313d commit 7193d1b
Show file tree
Hide file tree
Showing 5 changed files with 316 additions and 15 deletions.
198 changes: 198 additions & 0 deletions schema/omicron-datasets.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "DatasetsConfig",
"type": "object",
"required": [
"datasets",
"generation"
],
"properties": {
"datasets": {
"type": "array",
"items": {
"$ref": "#/definitions/DatasetConfig"
}
},
"generation": {
"description": "generation number of this configuration\n\nThis generation number is owned by the control plane (i.e., RSS or Nexus, depending on whether RSS-to-Nexus handoff has happened). It should not be bumped within Sled Agent.\n\nSled Agent rejects attempts to set the configuration to a generation older than the one it's currently running.",
"allOf": [
{
"$ref": "#/definitions/Generation"
}
]
}
},
"definitions": {
"DatasetConfig": {
"description": "Configuration information necessary to request a single dataset",
"type": "object",
"required": [
"id",
"name"
],
"properties": {
"compression": {
"description": "The compression mode to be supplied, if any",
"type": [
"string",
"null"
]
},
"id": {
"description": "The UUID of the dataset being requested",
"allOf": [
{
"$ref": "#/definitions/TypedUuidForDatasetKind"
}
]
},
"name": {
"description": "The dataset's name",
"allOf": [
{
"$ref": "#/definitions/DatasetName"
}
]
},
"quota": {
"description": "The upper bound on the amount of storage used by this dataset",
"type": [
"integer",
"null"
],
"format": "uint",
"minimum": 0.0
},
"reservation": {
"description": "The lower bound on the amount of storage usable by this dataset",
"type": [
"integer",
"null"
],
"format": "uint",
"minimum": 0.0
}
}
},
"DatasetKind": {
"description": "The type of a dataset, and an auxiliary information necessary to successfully launch a zone managing the associated data.",
"oneOf": [
{
"type": "object",
"required": [
"type"
],
"properties": {
"type": {
"type": "string",
"enum": [
"cockroach_db"
]
}
}
},
{
"type": "object",
"required": [
"type"
],
"properties": {
"type": {
"type": "string",
"enum": [
"crucible"
]
}
}
},
{
"type": "object",
"required": [
"type"
],
"properties": {
"type": {
"type": "string",
"enum": [
"clickhouse"
]
}
}
},
{
"type": "object",
"required": [
"type"
],
"properties": {
"type": {
"type": "string",
"enum": [
"clickhouse_keeper"
]
}
}
},
{
"type": "object",
"required": [
"type"
],
"properties": {
"type": {
"type": "string",
"enum": [
"external_dns"
]
}
}
},
{
"type": "object",
"required": [
"type"
],
"properties": {
"type": {
"type": "string",
"enum": [
"internal_dns"
]
}
}
}
]
},
"DatasetName": {
"type": "object",
"required": [
"kind",
"pool_name"
],
"properties": {
"kind": {
"$ref": "#/definitions/DatasetKind"
},
"pool_name": {
"$ref": "#/definitions/ZpoolName"
}
}
},
"Generation": {
"description": "Generation numbers stored in the database, used for optimistic concurrency control",
"type": "integer",
"format": "uint64",
"minimum": 0.0
},
"TypedUuidForDatasetKind": {
"type": "string",
"format": "uuid"
},
"ZpoolName": {
"title": "The name of a Zpool",
"description": "Zpool names are of the format ox{i,p}_<UUID>. They are either Internal or External, and should be unique",
"type": "string",
"pattern": "^ox[ip]_[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
}
}
}
4 changes: 4 additions & 0 deletions sled-storage/src/disk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,10 @@ pub struct DatasetsConfig {
///
/// Sled Agent rejects attempts to set the configuration to a generation
/// older than the one it's currently running.
///
/// Note that "Generation::new()", AKA, the first generation number,
/// is reserved for "no datasets". This is the default configuration
/// for a sled before any requests have been made.
pub generation: Generation,

pub datasets: Vec<DatasetConfig>,
Expand Down
8 changes: 4 additions & 4 deletions sled-storage/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,10 @@ pub enum Error {
},

#[error("Dataset configuration out-of-date (asked for {requested}, but latest is {current})")]
DatasetConfigurationOutdated {
requested: Generation,
current: Generation,
},
DatasetConfigurationOutdated { requested: Generation, current: Generation },

#[error("Dataset configuration changed for the same generation number: {generation}")]
DatasetConfigurationChanged { generation: Generation },

#[error("Failed to update ledger in internal storage")]
Ledger(#[from] omicron_common::ledger::Error),
Expand Down
110 changes: 99 additions & 11 deletions sled-storage/src/manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use omicron_common::ledger::Ledger;
use omicron_uuid_kinds::DatasetUuid;
use omicron_uuid_kinds::GenericUuid;
use sled_hardware::DiskVariant;
use slog::{info, o, warn, Logger};
use slog::{error, info, o, warn, Logger};
use std::future::Future;
use tokio::sync::{mpsc, oneshot, watch};
use tokio::time::{interval, Duration, MissedTickBehavior};
Expand Down Expand Up @@ -615,7 +615,9 @@ impl StorageManager {
self.resources.insert_or_update_disk(raw_disk).await
}

async fn load_disks_ledger(&self) -> Option<Ledger<OmicronPhysicalDisksConfig>> {
async fn load_disks_ledger(
&self,
) -> Option<Ledger<OmicronPhysicalDisksConfig>> {
let ledger_paths = self.all_omicron_disk_ledgers().await;
let log = self.log.new(o!("request" => "load_disks_ledger"));
let maybe_ledger = Ledger::<OmicronPhysicalDisksConfig>::new(
Expand Down Expand Up @@ -701,12 +703,24 @@ impl StorageManager {
requested: config.generation,
current: ledger_data.generation,
});
}
} else if config.generation == ledger_data.generation {
info!(
log,
"Requested geenration number matches prior request",
);

// TODO: If the generation is equal, check that the values are
// also equal.
if ledger_data != &config {
error!(log, "Requested configuration changed (with the same generation)");
return Err(Error::DatasetConfigurationChanged {
generation: config.generation,
});
}
}

info!(log, "Request looks newer than prior requests");
info!(
log,
"Request looks newer than (or identical to) prior requests"
);
ledger
}
None => {
Expand Down Expand Up @@ -767,11 +781,8 @@ impl StorageManager {
let log = self.log.new(o!("request" => "datasets_list"));

let ledger_paths = self.all_omicron_dataset_ledgers().await;
let maybe_ledger = Ledger::<DatasetsConfig>::new(
&log,
ledger_paths.clone(),
)
.await;
let maybe_ledger =
Ledger::<DatasetsConfig>::new(&log, ledger_paths.clone()).await;

match maybe_ledger {
Some(ledger) => {
Expand Down Expand Up @@ -1082,6 +1093,7 @@ mod tests {

use super::*;
use camino_tempfile::tempdir_in;
use omicron_common::api::external::Generation;
use omicron_common::ledger;
use omicron_test_utils::dev::test_setup_log;
use sled_hardware::DiskFirmware;
Expand Down Expand Up @@ -1560,6 +1572,82 @@ mod tests {
harness.cleanup().await;
logctx.cleanup_successful();
}

#[tokio::test]
async fn ensure_datasets() {
illumos_utils::USE_MOCKS.store(false, Ordering::SeqCst);
let logctx = test_setup_log("ensure_datasets");
let mut harness = StorageManagerTestHarness::new(&logctx.log).await;

// Test setup: Add a U.2 and M.2, adopt them into the "control plane"
// for usage.
harness.handle().key_manager_ready().await;
let raw_disks =
harness.add_vdevs(&["u2_under_test.vdev", "m2_helping.vdev"]).await;
let config = harness.make_config(1, &raw_disks);
let result = harness
.handle()
.omicron_physical_disks_ensure(config.clone())
.await
.expect("Ensuring disks should work after key manager is ready");
assert!(!result.has_error(), "{:?}", result);

// Create a dataset on the newly formatted U.2
let id = DatasetUuid::new_v4();
let zpool_name = ZpoolName::new_external(config.disks[0].pool_id);
let name = DatasetName::new(zpool_name.clone(), DatasetKind::Crucible);
let datasets = vec![DatasetConfig {
id,
name,
compression: None,
quota: None,
reservation: None,
}];
// "Generation = 1" is reserved as "no requests seen yet", so we jump
// past it.
let generation = Generation::new().next();
let mut config = DatasetsConfig { generation, datasets };

let status =
harness.handle().datasets_ensure(config.clone()).await.unwrap();
assert!(!status.has_error());

// List datasets, expect to see what we just created
let observed_config = harness.handle().datasets_list().await.unwrap();
assert_eq!(config, observed_config);

// Calling "datasets_ensure" with the same input should succeed.
let status =
harness.handle().datasets_ensure(config.clone()).await.unwrap();
assert!(!status.has_error());

let current_config_generation = config.generation;
let next_config_generation = config.generation.next();

// Calling "datasets_ensure" with an old generation should fail
config.generation = Generation::new();
let err =
harness.handle().datasets_ensure(config.clone()).await.unwrap_err();
assert!(matches!(err, Error::DatasetConfigurationOutdated { .. }));

// However, calling it with a different input and the same generation
// number should fail.
config.generation = current_config_generation;
config.datasets[0].reservation = Some(1024);
let err =
harness.handle().datasets_ensure(config.clone()).await.unwrap_err();
assert!(matches!(err, Error::DatasetConfigurationChanged { .. }));

// If we bump the generation number while making a change, updated
// configs will work.
config.generation = next_config_generation;
let status =
harness.handle().datasets_ensure(config.clone()).await.unwrap();
assert!(!status.has_error());

harness.cleanup().await;
logctx.cleanup_successful();
}
}

#[cfg(test)]
Expand Down
11 changes: 11 additions & 0 deletions sled-storage/src/resources.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,17 @@ pub struct DatasetsManagementResult {
pub status: Vec<DatasetManagementStatus>,
}

impl DatasetsManagementResult {
pub fn has_error(&self) -> bool {
for status in &self.status {
if status.err.is_some() {
return true;
}
}
false
}
}

/// Identifies how a single disk management operation may have succeeded or
/// failed.
#[derive(Debug, JsonSchema, Serialize, Deserialize)]
Expand Down

0 comments on commit 7193d1b

Please sign in to comment.