Skip to content

Commit

Permalink
Add space for metadata in unencrypted use case
Browse files Browse the repository at this point in the history
  • Loading branch information
jbaublitz committed Nov 8, 2023
1 parent 917f429 commit 9e19496
Show file tree
Hide file tree
Showing 8 changed files with 106 additions and 47 deletions.
2 changes: 2 additions & 0 deletions src/bin/stratis-legacy-pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ fn parse_args() -> ParseReturn {
}

fn main() -> StratisResult<()> {
env_logger::init();

let (name, devices, key_desc, clevis_info) = parse_args()?;
let unowned = ProcessedPathInfos::try_from(
devices
Expand Down
1 change: 1 addition & 0 deletions src/engine/strat_engine/backstore/backstore/v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -979,6 +979,7 @@ impl Recordable<BackstoreSave> for Backstore {
cache_tier: self.cache_tier.as_ref().map(|c| c.record()),
cap: CapSave {
allocs: vec![(Sectors(0), self.next)],
meta_allocs: Vec::new(),
},
data_tier: self.data_tier.record(),
}
Expand Down
105 changes: 83 additions & 22 deletions src/engine/strat_engine/backstore/backstore/v2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,10 @@ pub struct Backstore {
/// Either encryption information for a handle to be created at a later time or
/// handle for encryption layer in backstore.
enc: Option<Either<EncryptionInfo, CryptHandle>>,
/// Index for managing allocation of cap device
next: Sectors,
/// Data allocations on the cap device.
allocs: Vec<(Sectors, Sectors)>,
/// Metadata allocations on the cap device.
meta_allocs: Vec<(Sectors, Sectors)>,
}

impl InternalBackstore for Backstore {
Expand All @@ -172,22 +174,13 @@ impl InternalBackstore for Backstore {
}

fn datatier_usable_size(&self) -> Sectors {
self.data_tier.usable_size()
- if self.enc.is_some() {
crypt_metadata_size().sectors()
} else {
Sectors(0)
}
self.data_tier.usable_size() - self.meta_allocs.iter().map(|(_, len)| *len).sum()
}

fn available_in_backstore(&self) -> Sectors {
self.data_tier.usable_size()
- self.next
- if self.enc.is_some() {
crypt_metadata_size().sectors()
} else {
Sectors(0)
}
- self.allocs.iter().map(|(_, len)| *len).sum()
- self.meta_allocs.iter().map(|(_, len)| *len).sum()
}

fn alloc(
Expand All @@ -196,7 +189,7 @@ impl InternalBackstore for Backstore {
sizes: &[Sectors],
) -> StratisResult<Option<Vec<(Sectors, Sectors)>>> {
let total_required = sizes.iter().cloned().sum();
let available = self.size() - self.next;
let available = self.size() - self.calc_next();
if available < total_required {
if self.data_tier.alloc(sizes) {
self.extend_cap_device(pool_uuid)?;
Expand All @@ -209,8 +202,10 @@ impl InternalBackstore for Backstore {

let mut chunks = Vec::new();
for size in sizes {
chunks.push((self.next, *size));
self.next += *size;
let next = self.calc_next();
let seg = (next, *size);
chunks.push(seg);
self.allocs.push(seg);
}

// Assert that the postcondition holds.
Expand All @@ -228,6 +223,33 @@ impl InternalBackstore for Backstore {
}

impl Backstore {
/// Calculate size allocated to data and not metadata in the backstore.
#[cfg(test)]
pub fn data_alloc_size(&self) -> Sectors {
self.allocs.iter().map(|(_, length)| *length).sum()
}

/// Calculate next from all of the metadata and data allocations present in the backstore.
fn calc_next(&self) -> Sectors {
let mut all_allocs = self
.allocs
.iter()
.chain(self.meta_allocs.iter())
.collect::<Vec<_>>();
all_allocs.sort();

for window in all_allocs.windows(2) {
let (start, length) = (window[0].0, window[0].1);
let start_next = window[1].0;
assert_eq!(start + length, start_next);
}

all_allocs
.last()
.map(|(offset, len)| *offset + *len)
.unwrap_or(Sectors(0))
}

/// Make a Backstore object from blockdevs that already belong to Stratis.
/// Precondition: every device in datadevs and cachedevs has already been
/// determined to belong to the pool with the specified pool_uuid.
Expand Down Expand Up @@ -353,7 +375,8 @@ impl Backstore {
cache,
placeholder,
enc,
next: backstore_save.cap.allocs[0].1,
allocs: backstore_save.cap.allocs.clone(),
meta_allocs: backstore_save.cap.meta_allocs.clone(),
})
}

Expand Down Expand Up @@ -385,17 +408,54 @@ impl Backstore {
cache: None,
origin: None,
enc: encryption_info.cloned().map(Either::Left),
next: Sectors(0),
allocs: Vec::new(),
meta_allocs: Vec::new(),
};

let size = crypt_metadata_size().sectors();
backstore.alloc(pool_uuid, &[size])?.ok_or_else(|| {
backstore.meta_alloc(pool_uuid, &[size])?.ok_or_else(|| {
StratisError::Msg(format!("Failed to satisfy request in backstore for {size}"))
})?;

Ok(backstore)
}

fn meta_alloc(
&mut self,
pool_uuid: PoolUuid,
sizes: &[Sectors],
) -> StratisResult<Option<Vec<(Sectors, Sectors)>>> {
let total_required = sizes.iter().cloned().sum();
let available = self.size() - self.calc_next();
if available < total_required {
if self.data_tier.alloc(sizes) {
self.extend_cap_device(pool_uuid)?;
} else {
return Ok(None);
}
}

let mut chunks = Vec::new();
for size in sizes {
let next = self.calc_next();
let seg = (next, *size);
chunks.push(seg);
self.meta_allocs.push(seg);
}

// Assert that the postcondition holds.
assert_eq!(
sizes,
chunks
.iter()
.map(|x| x.1)
.collect::<Vec<Sectors>>()
.as_slice()
);

Ok(Some(chunks))
}

/// Initialize the cache tier and add cachedevs to the backstore.
///
/// Returns all `DevUuid`s of devices that were added to the cache on initialization.
Expand Down Expand Up @@ -1051,7 +1111,8 @@ impl Recordable<BackstoreSave> for Backstore {
BackstoreSave {
cache_tier: self.cache_tier.as_ref().map(|c| c.record()),
cap: CapSave {
allocs: vec![(Sectors(0), self.next)],
allocs: self.allocs.clone(),
meta_allocs: self.meta_allocs.clone(),
},
data_tier: self.data_tier.record(),
}
Expand Down Expand Up @@ -1104,7 +1165,7 @@ mod tests {
_ => panic!("impossible; see first assertion"),
}
);
assert!(backstore.next <= backstore.size());
assert!(backstore.calc_next() <= backstore.size());

backstore.data_tier.invariant();

Expand Down
2 changes: 1 addition & 1 deletion src/engine/strat_engine/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ impl StratEngine {
}

#[cfg(test)]
async fn create_pool_legacy(
pub(crate) async fn create_pool_legacy(
&self,
name: &str,
blockdev_paths: &[&Path],
Expand Down
4 changes: 2 additions & 2 deletions src/engine/strat_engine/pool/v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1324,7 +1324,7 @@ mod tests {
thinpool::ThinPoolStatusDigest,
},
types::{EngineAction, PoolIdentifier},
Engine, StratEngine,
StratEngine,
};

use super::*;
Expand Down Expand Up @@ -1741,7 +1741,7 @@ mod tests {
fn test_grow_physical_pre_grow(paths: &[&Path]) {
let pool_name = Name::new("pool".to_string());
let engine = StratEngine::initialize().unwrap();
let pool_uuid = test_async!(engine.create_pool(&pool_name, paths, None))
let pool_uuid = test_async!(engine.create_pool_legacy(&pool_name, paths, None))
.unwrap()
.changed()
.unwrap();
Expand Down
34 changes: 13 additions & 21 deletions src/engine/strat_engine/pool/v2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,29 +50,15 @@ use crate::{
/// Precondition: This method is called only when setting up a pool, which
/// ensures that the flex devs metadata lists are all non-empty.
fn next_index(flex_devs: &FlexDevsSave) -> Sectors {
let expect_msg = "Setting up rather than initializing a pool, so each flex dev must have been allocated at least some segments.";
[
flex_devs
.meta_dev
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
flex_devs
.thin_meta_dev
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
flex_devs
.thin_data_dev
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
flex_devs
.thin_meta_dev_spare
.last()
.unwrap_or_else(|| panic!("{}", expect_msg)),
&flex_devs.meta_dev,
&flex_devs.thin_meta_dev,
&flex_devs.thin_data_dev,
&flex_devs.thin_meta_dev_spare,
]
.iter()
.max_by_key(|x| x.0)
.map(|&&(start, length)| start + length)
.expect("iterator is non-empty")
.flat_map(|vec| vec.iter().map(|(_, length)| *length))
.sum()
}

/// Check the metadata of an individual pool for consistency.
Expand All @@ -81,7 +67,13 @@ fn next_index(flex_devs: &FlexDevsSave) -> Sectors {
fn check_metadata(metadata: &PoolSave) -> StratisResult<()> {
let flex_devs = &metadata.flex_devs;
let next = next_index(flex_devs);
let allocated_from_cap = metadata.backstore.cap.allocs[0].1;
let allocated_from_cap = metadata
.backstore
.cap
.allocs
.iter()
.map(|(_, size)| *size)
.sum::<Sectors>();

if allocated_from_cap != next {
let err_msg = format!(
Expand Down
3 changes: 3 additions & 0 deletions src/engine/strat_engine/serde_structs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ pub struct BaseBlockDevSave {
#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct CapSave {
pub allocs: Vec<(Sectors, Sectors)>,
#[serde(skip_serializing_if = "Vec::is_empty")]
#[serde(default)]
pub meta_allocs: Vec<(Sectors, Sectors)>,
}

#[derive(Debug, Deserialize, Eq, PartialEq, Serialize)]
Expand Down
2 changes: 1 addition & 1 deletion src/engine/strat_engine/thinpool/thinpool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2929,7 +2929,7 @@ mod tests {
.unwrap()
);
assert_eq!(
backstore.datatier_allocated_size(),
backstore.data_alloc_size(),
pool.thin_pool.data_dev().size()
+ pool.thin_pool.meta_dev().size() * 2u64
+ pool.mdv.device().size()
Expand Down

0 comments on commit 9e19496

Please sign in to comment.