Skip to content

Commit

Permalink
Add space for metadata in unencrypted use case
Browse files Browse the repository at this point in the history
  • Loading branch information
jbaublitz committed Nov 13, 2023
1 parent 47defae commit a374abd
Show file tree
Hide file tree
Showing 8 changed files with 150 additions and 69 deletions.
2 changes: 2 additions & 0 deletions src/bin/stratis-legacy-pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ fn parse_args() -> ParseReturn {
}

fn main() -> StratisResult<()> {
env_logger::init();

let (name, devices, key_desc, clevis_info) = parse_args()?;
let unowned = ProcessedPathInfos::try_from(
devices
Expand Down
1 change: 1 addition & 0 deletions src/engine/strat_engine/backstore/backstore/v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -979,6 +979,7 @@ impl Recordable<BackstoreSave> for Backstore {
cache_tier: self.cache_tier.as_ref().map(|c| c.record()),
cap: CapSave {
allocs: vec![(Sectors(0), self.next)],
cache_meta_allocs: Vec::new(),
},
data_tier: self.data_tier.record(),
}
Expand Down
169 changes: 126 additions & 43 deletions src/engine/strat_engine/backstore/backstore/v2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,44 +150,33 @@ pub struct Backstore {
/// Either encryption information for a handle to be created at a later time or
/// handle for encryption layer in backstore.
enc: Option<Either<EncryptionInfo, CryptHandle>>,
/// Index for managing allocation of cap device
next: Sectors,
/// Data allocations on the cap device,
allocs: Vec<(Sectors, Sectors)>,
/// Metadata allocations on the cache or placeholder device.
cache_meta_allocs: Vec<(Sectors, Sectors)>,
}

impl InternalBackstore for Backstore {
fn device(&self) -> Option<Device> {
self.enc
.as_ref()
.and_then(|either| either.as_ref().right().map(|h| h.device()))
.or_else(|| {
self.cache
.as_ref()
.map(|c| c.device())
.or_else(|| self.placeholder.as_ref().map(|lin| lin.device()))
})
.or_else(|| self.cache.as_ref().map(|c| c.device()))
.or_else(|| self.placeholder.as_ref().map(|lin| lin.device()))
}

fn datatier_allocated_size(&self) -> Sectors {
self.data_tier.allocated()
}

fn datatier_usable_size(&self) -> Sectors {
self.data_tier.usable_size()
- if self.enc.is_some() {
crypt_metadata_size().sectors()
} else {
Sectors(0)
}
self.data_tier.usable_size() - self.cache_meta_allocs.iter().map(|(_, len)| *len).sum()
}

fn available_in_backstore(&self) -> Sectors {
self.data_tier.usable_size()
- self.next
- if self.enc.is_some() {
crypt_metadata_size().sectors()
} else {
Sectors(0)
}
- self.allocs.iter().map(|(_, len)| *len).sum()
- self.cache_meta_allocs.iter().map(|(_, len)| *len).sum()
}

fn alloc(
Expand All @@ -208,8 +197,10 @@ impl InternalBackstore for Backstore {

let mut chunks = Vec::new();
for size in sizes {
chunks.push((self.next, *size));
self.next += *size;
let next = self.calc_next_cap();
let seg = (next, *size);
chunks.push(seg);
self.allocs.push(seg);
}

// Assert that the postcondition holds.
Expand All @@ -227,6 +218,66 @@ impl InternalBackstore for Backstore {
}

impl Backstore {
/// Calculate size allocated to data and not metadata in the backstore.
#[cfg(test)]
pub fn data_alloc_size(&self) -> Sectors {
self.allocs.iter().map(|(_, length)| *length).sum()
}

/// Calculate next from all of the metadata and data allocations present in the backstore.
fn calc_next_cache(&self) -> StratisResult<Sectors> {
let mut all_allocs = if self.allocs.is_empty() {
if matches!(self.enc, Some(Either::Right(_))) {
return Err(StratisError::Msg(
"Metadata can only be allocated at the beginning of the cache device before the encryption device".to_string()
));
} else {
self.cache_meta_allocs.clone()
}
} else {
return Err(StratisError::Msg(
"Metadata can only be allocated at the beginning of the cache device before the encryption device".to_string()
));
};
all_allocs.sort();

for window in all_allocs.windows(2) {
let (start, length) = (window[0].0, window[0].1);
let start_next = window[1].0;
assert_eq!(start + length, start_next);
}

Ok(all_allocs
.last()
.map(|(offset, len)| *offset + *len)
.unwrap_or(Sectors(0)))
}

/// Calculate next from all of the metadata and data allocations present in the backstore.
fn calc_next_cap(&self) -> Sectors {
let mut all_allocs = if self.is_encrypted() {
self.allocs.clone()
} else {
self.allocs
.iter()
.cloned()
.chain(self.cache_meta_allocs.iter().cloned())
.collect::<Vec<_>>()
};
all_allocs.sort();

for window in all_allocs.windows(2) {
let (start, length) = (window[0].0, window[0].1);
let start_next = window[1].0;
assert_eq!(start + length, start_next);
}

all_allocs
.last()
.map(|(offset, len)| *offset + *len)
.unwrap_or(Sectors(0))
}

/// Make a Backstore object from blockdevs that already belong to Stratis.
/// Precondition: every device in datadevs and cachedevs has already been
/// determined to belong to the pool with the specified pool_uuid.
Expand Down Expand Up @@ -352,7 +403,8 @@ impl Backstore {
cache,
placeholder,
enc,
next: backstore_save.cap.allocs[0].1,
allocs: backstore_save.cap.allocs.clone(),
cache_meta_allocs: backstore_save.cap.cache_meta_allocs.clone(),
})
}

Expand Down Expand Up @@ -384,17 +436,52 @@ impl Backstore {
cache: None,
origin: None,
enc: encryption_info.cloned().map(Either::Left),
next: Sectors(0),
allocs: Vec::new(),
cache_meta_allocs: Vec::new(),
};

let size = crypt_metadata_size().sectors();
backstore.alloc(pool_uuid, &[size])?.ok_or_else(|| {
StratisError::Msg(format!("Failed to satisfy request in backstore for {size}"))
})?;
if !backstore.meta_alloc_cache(&[size])? {
return Err(StratisError::Msg(format!(
"Failed to satisfy request in backstore for {size}"
)));
}

Ok(backstore)
}

fn meta_alloc_cache(&mut self, sizes: &[Sectors]) -> StratisResult<bool> {
let total_required = sizes.iter().cloned().sum();
let available = self.available_in_backstore();
if available < total_required {
return Ok(false);
}

if !self.data_tier.alloc(sizes) {
return Ok(false);
}

let mut chunks = Vec::new();
for size in sizes {
let next = self.calc_next_cache()?;
let seg = (next, *size);
chunks.push(seg);
self.cache_meta_allocs.push(seg);
}

// Assert that the postcondition holds.
assert_eq!(
sizes,
chunks
.iter()
.map(|x| x.1)
.collect::<Vec<Sectors>>()
.as_slice()
);

Ok(true)
}

/// Initialize the cache tier and add cachedevs to the backstore.
///
/// Returns all `DevUuid`s of devices that were added to the cache on initialization.
Expand Down Expand Up @@ -513,7 +600,7 @@ impl Backstore {
self.cache.as_mut(),
self.placeholder
.as_mut()
.and_then(|c| self.origin.as_mut().map(|l| (c, l))),
.and_then(|p| self.origin.as_mut().map(|o| (p, o))),
self.enc.as_mut(),
) {
(None, None, None) => true,
Expand All @@ -531,20 +618,20 @@ impl Backstore {
cache.resume(get_dm())?;
false
}
(None, Some((cap, linear)), Some(Either::Right(handle))) => {
(None, Some((placeholder, origin)), Some(Either::Right(handle))) => {
let table = self.data_tier.segments.map_to_dm();
linear.set_table(get_dm(), table)?;
linear.resume(get_dm())?;
origin.set_table(get_dm(), table)?;
origin.resume(get_dm())?;
let table = vec![TargetLine::new(
Sectors(0),
linear.size(),
origin.size(),
LinearDevTargetParams::Linear(LinearTargetParams::new(
linear.device(),
origin.device(),
Sectors(0),
)),
)];
cap.set_table(get_dm(), table)?;
cap.resume(get_dm())?;
placeholder.set_table(get_dm(), table)?;
placeholder.resume(get_dm())?;
handle.resize(None)?;
false
}
Expand Down Expand Up @@ -1051,7 +1138,8 @@ impl Recordable<BackstoreSave> for Backstore {
BackstoreSave {
cache_tier: self.cache_tier.as_ref().map(|c| c.record()),
cap: CapSave {
allocs: vec![(Sectors(0), self.next)],
allocs: self.allocs.clone(),
cache_meta_allocs: self.cache_meta_allocs.clone(),
},
data_tier: self.data_tier.record(),
}
Expand Down Expand Up @@ -1093,18 +1181,13 @@ mod tests {
assert_eq!(
backstore.data_tier.allocated(),
match (&backstore.origin, &backstore.cache) {
(None, None) =>
if backstore.is_encrypted() {
crypt_metadata_size().sectors()
} else {
Sectors(0)
},
(None, None) => crypt_metadata_size().sectors(),
(&None, Some(cache)) => cache.size(),
(Some(linear), &None) => linear.size(),
_ => panic!("impossible; see first assertion"),
}
);
assert!(backstore.next <= backstore.size());
assert!(backstore.calc_next_cap() <= backstore.size());

backstore.data_tier.invariant();

Expand Down
2 changes: 1 addition & 1 deletion src/engine/strat_engine/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ impl StratEngine {
}

#[cfg(test)]
async fn create_pool_legacy(
pub(crate) async fn create_pool_legacy(
&self,
name: &str,
blockdev_paths: &[&Path],
Expand Down
4 changes: 2 additions & 2 deletions src/engine/strat_engine/pool/v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1324,7 +1324,7 @@ mod tests {
thinpool::ThinPoolStatusDigest,
},
types::{EngineAction, PoolIdentifier},
Engine, StratEngine,
StratEngine,
};

use super::*;
Expand Down Expand Up @@ -1741,7 +1741,7 @@ mod tests {
fn test_grow_physical_pre_grow(paths: &[&Path]) {
let pool_name = Name::new("pool".to_string());
let engine = StratEngine::initialize().unwrap();
let pool_uuid = test_async!(engine.create_pool(&pool_name, paths, None))
let pool_uuid = test_async!(engine.create_pool_legacy(&pool_name, paths, None))
.unwrap()
.changed()
.unwrap();
Expand Down
36 changes: 14 additions & 22 deletions src/engine/strat_engine/pool/v2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,29 +50,15 @@ use crate::{
/// Precondition: This method is called only when setting up a pool, which
/// ensures that the flex devs metadata lists are all non-empty.
fn next_index(flex_devs: &FlexDevsSave) -> Sectors {
let expect_msg = "Setting up rather than initializing a pool, so each flex dev must have been allocated at least some segments.";
[
flex_devs
.meta_dev
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
flex_devs
.thin_meta_dev
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
flex_devs
.thin_data_dev
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
flex_devs
.thin_meta_dev_spare
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
&flex_devs.meta_dev,
&flex_devs.thin_meta_dev,
&flex_devs.thin_data_dev,
&flex_devs.thin_meta_dev_spare,
]
.iter()
.max_by_key(|x| x.0)
.map(|&&(start, length)| start + length)
.expect("iterator is non-empty")
.flat_map(|vec| vec.iter().map(|(_, length)| *length))
.sum()
}

/// Check the metadata of an individual pool for consistency.
Expand All @@ -81,7 +67,13 @@ fn next_index(flex_devs: &FlexDevsSave) -> Sectors {
fn check_metadata(metadata: &PoolSave) -> StratisResult<()> {
let flex_devs = &metadata.flex_devs;
let next = next_index(flex_devs);
let allocated_from_cap = metadata.backstore.cap.allocs[0].1;
let allocated_from_cap = metadata
.backstore
.cap
.allocs
.iter()
.map(|(_, size)| *size)
.sum::<Sectors>();

if allocated_from_cap != next {
let err_msg = format!(
Expand Down Expand Up @@ -168,7 +160,7 @@ impl StratPool {

let thinpool = ThinPool::<Backstore>::new(
pool_uuid,
match ThinPoolSizeParams::new(backstore.datatier_usable_size()) {
match ThinPoolSizeParams::new(backstore.available_in_backstore()) {
Ok(ref params) => params,
Err(causal_error) => {
if let Err(cleanup_err) = backstore.destroy(pool_uuid) {
Expand Down
3 changes: 3 additions & 0 deletions src/engine/strat_engine/serde_structs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ pub struct BaseBlockDevSave {
#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct CapSave {
pub allocs: Vec<(Sectors, Sectors)>,
#[serde(skip_serializing_if = "Vec::is_empty")]
#[serde(default)]
pub cache_meta_allocs: Vec<(Sectors, Sectors)>,
}

#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)]
Expand Down
2 changes: 1 addition & 1 deletion src/engine/strat_engine/thinpool/thinpool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2929,7 +2929,7 @@ mod tests {
.unwrap()
);
assert_eq!(
backstore.datatier_allocated_size(),
backstore.data_alloc_size(),
pool.thin_pool.data_dev().size()
+ pool.thin_pool.meta_dev().size() * 2u64
+ pool.mdv.device().size()
Expand Down

0 comments on commit a374abd

Please sign in to comment.