From 9e19496689ae7d0077e1a55240391a067b646c3c Mon Sep 17 00:00:00 2001 From: John Baublitz Date: Mon, 6 Nov 2023 18:16:00 -0500 Subject: [PATCH] Add space for metadata in unencrypted use case --- src/bin/stratis-legacy-pool.rs | 2 + .../strat_engine/backstore/backstore/v1.rs | 1 + .../strat_engine/backstore/backstore/v2.rs | 105 ++++++++++++++---- src/engine/strat_engine/engine.rs | 2 +- src/engine/strat_engine/pool/v1.rs | 4 +- src/engine/strat_engine/pool/v2.rs | 34 +++--- src/engine/strat_engine/serde_structs.rs | 3 + src/engine/strat_engine/thinpool/thinpool.rs | 2 +- 8 files changed, 106 insertions(+), 47 deletions(-) diff --git a/src/bin/stratis-legacy-pool.rs b/src/bin/stratis-legacy-pool.rs index 5b7081240fa..fb5dbaeea0f 100644 --- a/src/bin/stratis-legacy-pool.rs +++ b/src/bin/stratis-legacy-pool.rs @@ -108,6 +108,8 @@ fn parse_args() -> ParseReturn { } fn main() -> StratisResult<()> { + env_logger::init(); + let (name, devices, key_desc, clevis_info) = parse_args()?; let unowned = ProcessedPathInfos::try_from( devices diff --git a/src/engine/strat_engine/backstore/backstore/v1.rs b/src/engine/strat_engine/backstore/backstore/v1.rs index 9ccd36a310b..cc2f88ad3c8 100644 --- a/src/engine/strat_engine/backstore/backstore/v1.rs +++ b/src/engine/strat_engine/backstore/backstore/v1.rs @@ -979,6 +979,7 @@ impl Recordable for Backstore { cache_tier: self.cache_tier.as_ref().map(|c| c.record()), cap: CapSave { allocs: vec![(Sectors(0), self.next)], + meta_allocs: Vec::new(), }, data_tier: self.data_tier.record(), } diff --git a/src/engine/strat_engine/backstore/backstore/v2.rs b/src/engine/strat_engine/backstore/backstore/v2.rs index e8f038d14f5..6e6a8f349a1 100644 --- a/src/engine/strat_engine/backstore/backstore/v2.rs +++ b/src/engine/strat_engine/backstore/backstore/v2.rs @@ -150,8 +150,10 @@ pub struct Backstore { /// Either encryption information for a handle to be created at a later time or /// handle for encryption layer in backstore. enc: Option>, - /// Index for managing allocation of cap device - next: Sectors, + /// Data allocations on the cap device. + allocs: Vec<(Sectors, Sectors)>, + /// Metadata allocations on the cap device. + meta_allocs: Vec<(Sectors, Sectors)>, } impl InternalBackstore for Backstore { @@ -172,22 +174,13 @@ impl InternalBackstore for Backstore { } fn datatier_usable_size(&self) -> Sectors { - self.data_tier.usable_size() - - if self.enc.is_some() { - crypt_metadata_size().sectors() - } else { - Sectors(0) - } + self.data_tier.usable_size() - self.meta_allocs.iter().map(|(_, len)| *len).sum() } fn available_in_backstore(&self) -> Sectors { self.data_tier.usable_size() - - self.next - - if self.enc.is_some() { - crypt_metadata_size().sectors() - } else { - Sectors(0) - } + - self.allocs.iter().map(|(_, len)| *len).sum() + - self.meta_allocs.iter().map(|(_, len)| *len).sum() } fn alloc( @@ -196,7 +189,7 @@ impl InternalBackstore for Backstore { sizes: &[Sectors], ) -> StratisResult>> { let total_required = sizes.iter().cloned().sum(); - let available = self.size() - self.next; + let available = self.size() - self.calc_next(); if available < total_required { if self.data_tier.alloc(sizes) { self.extend_cap_device(pool_uuid)?; @@ -209,8 +202,10 @@ impl InternalBackstore for Backstore { let mut chunks = Vec::new(); for size in sizes { - chunks.push((self.next, *size)); - self.next += *size; + let next = self.calc_next(); + let seg = (next, *size); + chunks.push(seg); + self.allocs.push(seg); } // Assert that the postcondition holds. @@ -228,6 +223,33 @@ impl InternalBackstore for Backstore { } impl Backstore { + /// Calculate size allocated to data and not metadata in the backstore. + #[cfg(test)] + pub fn data_alloc_size(&self) -> Sectors { + self.allocs.iter().map(|(_, length)| *length).sum() + } + + /// Calculate next from all of the metadata and data allocations present in the backstore. + fn calc_next(&self) -> Sectors { + let mut all_allocs = self + .allocs + .iter() + .chain(self.meta_allocs.iter()) + .collect::>(); + all_allocs.sort(); + + for window in all_allocs.windows(2) { + let (start, length) = (window[0].0, window[0].1); + let start_next = window[1].0; + assert_eq!(start + length, start_next); + } + + all_allocs + .last() + .map(|(offset, len)| *offset + *len) + .unwrap_or(Sectors(0)) + } + /// Make a Backstore object from blockdevs that already belong to Stratis. /// Precondition: every device in datadevs and cachedevs has already been /// determined to belong to the pool with the specified pool_uuid. @@ -353,7 +375,8 @@ impl Backstore { cache, placeholder, enc, - next: backstore_save.cap.allocs[0].1, + allocs: backstore_save.cap.allocs.clone(), + meta_allocs: backstore_save.cap.meta_allocs.clone(), }) } @@ -385,17 +408,54 @@ impl Backstore { cache: None, origin: None, enc: encryption_info.cloned().map(Either::Left), - next: Sectors(0), + allocs: Vec::new(), + meta_allocs: Vec::new(), }; let size = crypt_metadata_size().sectors(); - backstore.alloc(pool_uuid, &[size])?.ok_or_else(|| { + backstore.meta_alloc(pool_uuid, &[size])?.ok_or_else(|| { StratisError::Msg(format!("Failed to satisfy request in backstore for {size}")) })?; Ok(backstore) } + fn meta_alloc( + &mut self, + pool_uuid: PoolUuid, + sizes: &[Sectors], + ) -> StratisResult>> { + let total_required = sizes.iter().cloned().sum(); + let available = self.size() - self.calc_next(); + if available < total_required { + if self.data_tier.alloc(sizes) { + self.extend_cap_device(pool_uuid)?; + } else { + return Ok(None); + } + } + + let mut chunks = Vec::new(); + for size in sizes { + let next = self.calc_next(); + let seg = (next, *size); + chunks.push(seg); + self.meta_allocs.push(seg); + } + + // Assert that the postcondition holds. + assert_eq!( + sizes, + chunks + .iter() + .map(|x| x.1) + .collect::>() + .as_slice() + ); + + Ok(Some(chunks)) + } + /// Initialize the cache tier and add cachedevs to the backstore. /// /// Returns all `DevUuid`s of devices that were added to the cache on initialization. @@ -1051,7 +1111,8 @@ impl Recordable for Backstore { BackstoreSave { cache_tier: self.cache_tier.as_ref().map(|c| c.record()), cap: CapSave { - allocs: vec![(Sectors(0), self.next)], + allocs: self.allocs.clone(), + meta_allocs: self.meta_allocs.clone(), }, data_tier: self.data_tier.record(), } @@ -1104,7 +1165,7 @@ mod tests { _ => panic!("impossible; see first assertion"), } ); - assert!(backstore.next <= backstore.size()); + assert!(backstore.calc_next() <= backstore.size()); backstore.data_tier.invariant(); diff --git a/src/engine/strat_engine/engine.rs b/src/engine/strat_engine/engine.rs index 81530a5baa5..d74550483aa 100644 --- a/src/engine/strat_engine/engine.rs +++ b/src/engine/strat_engine/engine.rs @@ -98,7 +98,7 @@ impl StratEngine { } #[cfg(test)] - async fn create_pool_legacy( + pub(crate) async fn create_pool_legacy( &self, name: &str, blockdev_paths: &[&Path], diff --git a/src/engine/strat_engine/pool/v1.rs b/src/engine/strat_engine/pool/v1.rs index c3b061ca87b..1a9d135502c 100644 --- a/src/engine/strat_engine/pool/v1.rs +++ b/src/engine/strat_engine/pool/v1.rs @@ -1324,7 +1324,7 @@ mod tests { thinpool::ThinPoolStatusDigest, }, types::{EngineAction, PoolIdentifier}, - Engine, StratEngine, + StratEngine, }; use super::*; @@ -1741,7 +1741,7 @@ mod tests { fn test_grow_physical_pre_grow(paths: &[&Path]) { let pool_name = Name::new("pool".to_string()); let engine = StratEngine::initialize().unwrap(); - let pool_uuid = test_async!(engine.create_pool(&pool_name, paths, None)) + let pool_uuid = test_async!(engine.create_pool_legacy(&pool_name, paths, None)) .unwrap() .changed() .unwrap(); diff --git a/src/engine/strat_engine/pool/v2.rs b/src/engine/strat_engine/pool/v2.rs index 48053f5e173..7a9e276446c 100644 --- a/src/engine/strat_engine/pool/v2.rs +++ b/src/engine/strat_engine/pool/v2.rs @@ -50,29 +50,15 @@ use crate::{ /// Precondition: This method is called only when setting up a pool, which /// ensures that the flex devs metadata lists are all non-empty. fn next_index(flex_devs: &FlexDevsSave) -> Sectors { - let expect_msg = "Setting up rather than initializing a pool, so each flex dev must have been allocated at least some segments."; [ - flex_devs - .meta_dev - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), - flex_devs - .thin_meta_dev - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), - flex_devs - .thin_data_dev - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), - flex_devs - .thin_meta_dev_spare - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), + &flex_devs.meta_dev, + &flex_devs.thin_meta_dev, + &flex_devs.thin_data_dev, + &flex_devs.thin_meta_dev_spare, ] .iter() - .max_by_key(|x| x.0) - .map(|&&(start, length)| start + length) - .expect("iterator is non-empty") + .flat_map(|vec| vec.iter().map(|(_, length)| *length)) + .sum() } /// Check the metadata of an individual pool for consistency. @@ -81,7 +67,13 @@ fn next_index(flex_devs: &FlexDevsSave) -> Sectors { fn check_metadata(metadata: &PoolSave) -> StratisResult<()> { let flex_devs = &metadata.flex_devs; let next = next_index(flex_devs); - let allocated_from_cap = metadata.backstore.cap.allocs[0].1; + let allocated_from_cap = metadata + .backstore + .cap + .allocs + .iter() + .map(|(_, size)| *size) + .sum::(); if allocated_from_cap != next { let err_msg = format!( diff --git a/src/engine/strat_engine/serde_structs.rs b/src/engine/strat_engine/serde_structs.rs index f77693509ab..5f0830e9ca9 100644 --- a/src/engine/strat_engine/serde_structs.rs +++ b/src/engine/strat_engine/serde_structs.rs @@ -78,6 +78,9 @@ pub struct BaseBlockDevSave { #[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct CapSave { pub allocs: Vec<(Sectors, Sectors)>, + #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default)] + pub meta_allocs: Vec<(Sectors, Sectors)>, } #[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] diff --git a/src/engine/strat_engine/thinpool/thinpool.rs b/src/engine/strat_engine/thinpool/thinpool.rs index ce4245b13aa..8dec70fb44f 100644 --- a/src/engine/strat_engine/thinpool/thinpool.rs +++ b/src/engine/strat_engine/thinpool/thinpool.rs @@ -2929,7 +2929,7 @@ mod tests { .unwrap() ); assert_eq!( - backstore.datatier_allocated_size(), + backstore.data_alloc_size(), pool.thin_pool.data_dev().size() + pool.thin_pool.meta_dev().size() * 2u64 + pool.mdv.device().size()