diff --git a/src/bin/stratis-legacy-pool.rs b/src/bin/stratis-legacy-pool.rs index 5b7081240fa..fb5dbaeea0f 100644 --- a/src/bin/stratis-legacy-pool.rs +++ b/src/bin/stratis-legacy-pool.rs @@ -108,6 +108,8 @@ fn parse_args() -> ParseReturn { } fn main() -> StratisResult<()> { + env_logger::init(); + let (name, devices, key_desc, clevis_info) = parse_args()?; let unowned = ProcessedPathInfos::try_from( devices diff --git a/src/engine/strat_engine/backstore/backstore/v1.rs b/src/engine/strat_engine/backstore/backstore/v1.rs index 47c3f6d2cc7..9b0cdb4d2fa 100644 --- a/src/engine/strat_engine/backstore/backstore/v1.rs +++ b/src/engine/strat_engine/backstore/backstore/v1.rs @@ -979,6 +979,7 @@ impl Recordable for Backstore { cache_tier: self.cache_tier.as_ref().map(|c| c.record()), cap: CapSave { allocs: vec![(Sectors(0), self.next)], + crypt_meta_allocs: Vec::new(), }, data_tier: self.data_tier.record(), } diff --git a/src/engine/strat_engine/backstore/backstore/v2.rs b/src/engine/strat_engine/backstore/backstore/v2.rs index 62046e5d41c..8c125c108a4 100644 --- a/src/engine/strat_engine/backstore/backstore/v2.rs +++ b/src/engine/strat_engine/backstore/backstore/v2.rs @@ -150,8 +150,10 @@ pub struct Backstore { /// Either encryption information for a handle to be created at a later time or /// handle for encryption layer in backstore. enc: Option>, - /// Index for managing allocation of cap device - next: Sectors, + /// Data allocations on the cap device, + allocs: Vec<(Sectors, Sectors)>, + /// Metadata allocations on the cache or placeholder device. + crypt_meta_allocs: Vec<(Sectors, Sectors)>, } impl InternalBackstore for Backstore { @@ -159,12 +161,8 @@ impl InternalBackstore for Backstore { self.enc .as_ref() .and_then(|either| either.as_ref().right().map(|h| h.device())) - .or_else(|| { - self.cache - .as_ref() - .map(|c| c.device()) - .or_else(|| self.placeholder.as_ref().map(|lin| lin.device())) - }) + .or_else(|| self.cache.as_ref().map(|c| c.device())) + .or_else(|| self.placeholder.as_ref().map(|lin| lin.device())) } fn datatier_allocated_size(&self) -> Sectors { @@ -172,22 +170,13 @@ impl InternalBackstore for Backstore { } fn datatier_usable_size(&self) -> Sectors { - self.data_tier.usable_size() - - if self.enc.is_some() { - crypt_metadata_size().sectors() - } else { - Sectors(0) - } + self.data_tier.usable_size() - self.crypt_meta_allocs.iter().map(|(_, len)| *len).sum() } fn available_in_backstore(&self) -> Sectors { self.data_tier.usable_size() - - self.next - - if self.enc.is_some() { - crypt_metadata_size().sectors() - } else { - Sectors(0) - } + - self.allocs.iter().map(|(_, len)| *len).sum() + - self.crypt_meta_allocs.iter().map(|(_, len)| *len).sum() } fn alloc( @@ -208,8 +197,10 @@ impl InternalBackstore for Backstore { let mut chunks = Vec::new(); for size in sizes { - chunks.push((self.next, *size)); - self.next += *size; + let next = self.calc_next_cap(); + let seg = (next, *size); + chunks.push(seg); + self.allocs.push(seg); } // Assert that the postcondition holds. @@ -227,6 +218,68 @@ impl InternalBackstore for Backstore { } impl Backstore { + /// Calculate size allocated to data and not metadata in the backstore. + #[cfg(test)] + pub fn data_alloc_size(&self) -> Sectors { + self.allocs.iter().map(|(_, length)| *length).sum() + } + + /// Calculate next from all of the metadata and data allocations present in the backstore. + fn calc_next_cache(&self) -> StratisResult { + let mut all_allocs = if self.allocs.is_empty() { + if matches!(self.enc, Some(Either::Right(_))) { + return Err(StratisError::Msg( + "Metadata can only be allocated at the beginning of the cache device before the encryption device".to_string() + )); + } else { + self.crypt_meta_allocs.clone() + } + } else { + return Err(StratisError::Msg( + "Metadata can only be allocated at the beginning of the cache device before the encryption device".to_string() + )); + }; + all_allocs.sort(); + + for window in all_allocs.windows(2) { + let (start, length) = (window[0].0, window[0].1); + let start_next = window[1].0; + assert_eq!(start + length, start_next); + } + + Ok(all_allocs + .last() + .map(|(offset, len)| *offset + *len) + .unwrap_or(Sectors(0))) + } + + /// Calculate next from all of the metadata and data allocations present in the backstore. + fn calc_next_cap(&self) -> Sectors { + let mut all_allocs = if self.is_encrypted() { + self.allocs.clone() + } else { + self.allocs + .iter() + .cloned() + .chain(self.crypt_meta_allocs.iter().cloned()) + .collect::>() + }; + all_allocs.sort(); + + for window in all_allocs.windows(2) { + let (start, length) = (window[0].0, window[0].1); + let start_next = window[1].0; + assert_eq!(start + length, start_next); + } + + let next = all_allocs + .last() + .map(|(offset, len)| *offset + *len) + .unwrap_or(Sectors(0)); + debug!("Next: {next}"); + next + } + /// Make a Backstore object from blockdevs that already belong to Stratis. /// Precondition: every device in datadevs and cachedevs has already been /// determined to belong to the pool with the specified pool_uuid. @@ -352,7 +405,8 @@ impl Backstore { cache, placeholder, enc, - next: backstore_save.cap.allocs[0].1, + allocs: backstore_save.cap.allocs.clone(), + crypt_meta_allocs: backstore_save.cap.crypt_meta_allocs.clone(), }) } @@ -384,17 +438,52 @@ impl Backstore { cache: None, origin: None, enc: encryption_info.cloned().map(Either::Left), - next: Sectors(0), + allocs: Vec::new(), + crypt_meta_allocs: Vec::new(), }; let size = crypt_metadata_size().sectors(); - backstore.alloc(pool_uuid, &[size])?.ok_or_else(|| { - StratisError::Msg(format!("Failed to satisfy request in backstore for {size}")) - })?; + if !backstore.meta_alloc_cache(&[size])? { + return Err(StratisError::Msg(format!( + "Failed to satisfy request in backstore for {size}" + ))); + } Ok(backstore) } + fn meta_alloc_cache(&mut self, sizes: &[Sectors]) -> StratisResult { + let total_required = sizes.iter().cloned().sum(); + let available = self.available_in_backstore(); + if available < total_required { + return Ok(false); + } + + if !self.data_tier.alloc(sizes) { + return Ok(false); + } + + let mut chunks = Vec::new(); + for size in sizes { + let next = self.calc_next_cache()?; + let seg = (next, *size); + chunks.push(seg); + self.crypt_meta_allocs.push(seg); + } + + // Assert that the postcondition holds. + assert_eq!( + sizes, + chunks + .iter() + .map(|x| x.1) + .collect::>() + .as_slice() + ); + + Ok(true) + } + /// Initialize the cache tier and add cachedevs to the backstore. /// /// Returns all `DevUuid`s of devices that were added to the cache on initialization. @@ -513,7 +602,7 @@ impl Backstore { self.cache.as_mut(), self.placeholder .as_mut() - .and_then(|c| self.origin.as_mut().map(|l| (c, l))), + .and_then(|p| self.origin.as_mut().map(|o| (p, o))), self.enc.as_mut(), ) { (None, None, None) => true, @@ -531,20 +620,20 @@ impl Backstore { cache.resume(get_dm())?; false } - (None, Some((cap, linear)), Some(Either::Right(handle))) => { + (None, Some((placeholder, origin)), Some(Either::Right(handle))) => { let table = self.data_tier.segments.map_to_dm(); - linear.set_table(get_dm(), table)?; - linear.resume(get_dm())?; + origin.set_table(get_dm(), table)?; + origin.resume(get_dm())?; let table = vec![TargetLine::new( Sectors(0), - linear.size(), + origin.size(), LinearDevTargetParams::Linear(LinearTargetParams::new( - linear.device(), + origin.device(), Sectors(0), )), )]; - cap.set_table(get_dm(), table)?; - cap.resume(get_dm())?; + placeholder.set_table(get_dm(), table)?; + placeholder.resume(get_dm())?; handle.resize(None)?; false } @@ -1051,7 +1140,8 @@ impl Recordable for Backstore { BackstoreSave { cache_tier: self.cache_tier.as_ref().map(|c| c.record()), cap: CapSave { - allocs: vec![(Sectors(0), self.next)], + allocs: self.allocs.clone(), + crypt_meta_allocs: self.crypt_meta_allocs.clone(), }, data_tier: self.data_tier.record(), } @@ -1093,18 +1183,20 @@ mod tests { assert_eq!( backstore.data_tier.allocated(), match (&backstore.origin, &backstore.cache) { - (None, None) => - if backstore.is_encrypted() { - crypt_metadata_size().sectors() - } else { - Sectors(0) - }, + (None, None) => crypt_metadata_size().sectors(), (&None, Some(cache)) => cache.size(), (Some(linear), &None) => linear.size(), _ => panic!("impossible; see first assertion"), } ); - assert!(backstore.next <= backstore.size()); + assert!( + backstore + .allocs + .iter() + .map(|(_, len)| *len) + .sum::() + <= backstore.size() + ); backstore.data_tier.invariant(); diff --git a/src/engine/strat_engine/engine.rs b/src/engine/strat_engine/engine.rs index 81530a5baa5..d74550483aa 100644 --- a/src/engine/strat_engine/engine.rs +++ b/src/engine/strat_engine/engine.rs @@ -98,7 +98,7 @@ impl StratEngine { } #[cfg(test)] - async fn create_pool_legacy( + pub(crate) async fn create_pool_legacy( &self, name: &str, blockdev_paths: &[&Path], diff --git a/src/engine/strat_engine/pool/v1.rs b/src/engine/strat_engine/pool/v1.rs index d373dd616d3..af6ee02290b 100644 --- a/src/engine/strat_engine/pool/v1.rs +++ b/src/engine/strat_engine/pool/v1.rs @@ -1345,7 +1345,7 @@ mod tests { thinpool::ThinPoolStatusDigest, }, types::{EngineAction, PoolIdentifier}, - Engine, StratEngine, + StratEngine, }; use super::*; @@ -1764,7 +1764,7 @@ mod tests { fn test_grow_physical_pre_grow(paths: &[&Path]) { let pool_name = Name::new("pool".to_string()); let engine = StratEngine::initialize().unwrap(); - let pool_uuid = test_async!(engine.create_pool(&pool_name, paths, None)) + let pool_uuid = test_async!(engine.create_pool_legacy(&pool_name, paths, None)) .unwrap() .changed() .unwrap(); diff --git a/src/engine/strat_engine/pool/v2.rs b/src/engine/strat_engine/pool/v2.rs index 103842edeb8..ce4fe258165 100644 --- a/src/engine/strat_engine/pool/v2.rs +++ b/src/engine/strat_engine/pool/v2.rs @@ -50,29 +50,15 @@ use crate::{ /// Precondition: This method is called only when setting up a pool, which /// ensures that the flex devs metadata lists are all non-empty. fn next_index(flex_devs: &FlexDevsSave) -> Sectors { - let expect_msg = "Setting up rather than initializing a pool, so each flex dev must have been allocated at least some segments."; [ - flex_devs - .meta_dev - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), - flex_devs - .thin_meta_dev - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), - flex_devs - .thin_data_dev - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), - flex_devs - .thin_meta_dev_spare - .last() - .unwrap_or_else(|| panic!("{}", expect_msg)), + &flex_devs.meta_dev, + &flex_devs.thin_meta_dev, + &flex_devs.thin_data_dev, + &flex_devs.thin_meta_dev_spare, ] .iter() - .max_by_key(|x| x.0) - .map(|&&(start, length)| start + length) - .expect("iterator is non-empty") + .flat_map(|vec| vec.iter().map(|(_, length)| *length)) + .sum() } /// Check the metadata of an individual pool for consistency. @@ -81,7 +67,13 @@ fn next_index(flex_devs: &FlexDevsSave) -> Sectors { fn check_metadata(metadata: &PoolSave) -> StratisResult<()> { let flex_devs = &metadata.flex_devs; let next = next_index(flex_devs); - let allocated_from_cap = metadata.backstore.cap.allocs[0].1; + let allocated_from_cap = metadata + .backstore + .cap + .allocs + .iter() + .map(|(_, size)| *size) + .sum::(); if allocated_from_cap != next { let err_msg = format!( @@ -168,7 +160,7 @@ impl StratPool { let thinpool = ThinPool::::new( pool_uuid, - match ThinPoolSizeParams::new(backstore.datatier_usable_size()) { + match ThinPoolSizeParams::new(backstore.available_in_backstore()) { Ok(ref params) => params, Err(causal_error) => { if let Err(cleanup_err) = backstore.destroy(pool_uuid) { diff --git a/src/engine/strat_engine/serde_structs.rs b/src/engine/strat_engine/serde_structs.rs index 66c2c813e97..1820425e65e 100644 --- a/src/engine/strat_engine/serde_structs.rs +++ b/src/engine/strat_engine/serde_structs.rs @@ -127,6 +127,9 @@ pub struct BaseBlockDevSave { #[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct CapSave { pub allocs: Vec<(Sectors, Sectors)>, + #[serde(skip_serializing_if = "Vec::is_empty")] + #[serde(default)] + pub crypt_meta_allocs: Vec<(Sectors, Sectors)>, } #[derive(Debug, Deserialize, Eq, PartialEq, Serialize)] diff --git a/src/engine/strat_engine/thinpool/thinpool.rs b/src/engine/strat_engine/thinpool/thinpool.rs index 47fbc220352..6b22f4f9f6f 100644 --- a/src/engine/strat_engine/thinpool/thinpool.rs +++ b/src/engine/strat_engine/thinpool/thinpool.rs @@ -2951,7 +2951,7 @@ mod tests { .unwrap() ); assert_eq!( - backstore.datatier_allocated_size(), + backstore.data_alloc_size(), pool.thin_pool.data_dev().size() + pool.thin_pool.meta_dev().size() * 2u64 + pool.mdv.device().size()