Skip to content

Commit

Permalink
storage: a series fixups for stargz chunks
Browse files Browse the repository at this point in the history
Signed-off-by: Yan Song <[email protected]>
  • Loading branch information
imeoer committed Jun 21, 2022
1 parent 8f972b3 commit 253bd20
Show file tree
Hide file tree
Showing 6 changed files with 68 additions and 11 deletions.
12 changes: 10 additions & 2 deletions rafs/src/metadata/layout/v6.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use lazy_static::lazy_static;
use nydus_utils::{compress, digest, round_up, ByteSize};
use storage::device::{BlobFeatures, BlobInfo};
use storage::meta::{BlobMetaHeaderOndisk, BLOB_FEATURE_4K_ALIGNED};
use storage::RAFS_MAX_CHUNK_SIZE;

use crate::metadata::{layout::RafsXAttrs, RafsStore, RafsSuperFlags};
use crate::{impl_bootstrap_converter, impl_pub_getter_setter, RafsIoReader, RafsIoWrite};
Expand Down Expand Up @@ -352,7 +353,10 @@ impl RafsV6SuperBlockExt {
}

let chunk_size = u32::from_le(self.s_chunk_size) as u64;
if !chunk_size.is_power_of_two() || chunk_size < EROFS_BLOCK_SIZE {
if !chunk_size.is_power_of_two()
|| chunk_size < EROFS_BLOCK_SIZE
|| chunk_size > RAFS_MAX_CHUNK_SIZE
{
return Err(einval!("invalid chunk size in Rafs v6 extended superblock"));
}

Expand Down Expand Up @@ -1292,7 +1296,11 @@ impl RafsV6Blob {
}

let c_size = u32::from_le(self.chunk_size) as u64;
if c_size.count_ones() != 1 || c_size < EROFS_BLOCK_SIZE || c_size != chunk_size as u64 {
if c_size.count_ones() != 1
|| c_size < EROFS_BLOCK_SIZE
|| c_size > RAFS_MAX_CHUNK_SIZE
|| c_size != chunk_size as u64
{
error!(
"RafsV6Blob: idx {} invalid c_size {}, count_ones() {}",
blob_index,
Expand Down
11 changes: 10 additions & 1 deletion src/bin/nydusd/fs_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,16 @@ impl FsCacheHandler {
}
Some(obj) => match obj.fetch_range_uncompressed(msg.off, msg.len) {
Ok(v) if v == msg.len as usize => {}
_ => debug!("fscache: failed to read data from blob object"),
Ok(v) => {
warn!(
"fscache: read data from blob object not matched: {} != {}",
v, msg.len
);
}
Err(e) => error!(
"{}",
format!("fscache: failed to read data from blob object: {}", e,)
),
},
}
}
Expand Down
16 changes: 16 additions & 0 deletions storage/src/cache/cachedfile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,22 @@ impl BlobObject for FileCacheEntry {

impl FileCacheEntry {
fn do_fetch_chunks(&self, chunks: &[BlobIoChunk]) -> Result<usize> {
if self.is_stargz() {
for chunk in chunks {
let mut buf = alloc_buf(chunk.uncompress_size() as usize);
self.read_raw_chunk(chunk, &mut buf, false, None)?;
if self.dio_enabled {
self.adjust_buffer_for_dio(&mut buf)
}
Self::persist_chunk(&self.file, chunk.uncompress_offset(), &buf)
.map_err(|e| eio!(format!("do_fetch_chunk failed to persist data, {:?}", e)))?;
self.chunk_map
.set_ready_and_clear_pending(chunk.as_base())
.unwrap_or_else(|e| error!("set ready failed, {}", e));
}
return Ok(0);
}

debug_assert!(!chunks.is_empty());
let bitmap = self
.chunk_map
Expand Down
5 changes: 1 addition & 4 deletions storage/src/cache/fscache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,6 @@ impl FileCacheEntry {
if blob_info.has_feature(BlobFeatures::V5_NO_EXT_BLOB_TABLE) {
return Err(einval!("fscache does not support Rafs v5 blobs"));
}
if blob_info.is_stargz() {
return Err(einval!("fscache does not support stargz blob file"));
}
let file = blob_info
.get_fscache_file()
.ok_or_else(|| einval!("No fscache file associated with the blob_info"))?;
Expand Down Expand Up @@ -211,7 +208,7 @@ impl FileCacheEntry {
is_get_blob_object_supported: true,
is_compressed: false,
is_direct_chunkmap: true,
is_stargz: false,
is_stargz: blob_info.is_stargz(),
dio_enabled: true,
need_validate: mgr.validate,
prefetch_config,
Expand Down
4 changes: 2 additions & 2 deletions storage/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ macro_rules! impl_getter {

/// Default blob chunk size.
pub const RAFS_DEFAULT_CHUNK_SIZE: u64 = 1024 * 1024;
/// Maximum blob chunk size.
pub const RAFS_MAX_CHUNK_SIZE: u64 = 1024 * 1024;
/// Maximum blob chunk size, 16MB.
pub const RAFS_MAX_CHUNK_SIZE: u64 = 1024 * 1024 * 16;

/// Error codes related to storage subsystem.
#[derive(Debug)]
Expand Down
31 changes: 29 additions & 2 deletions storage/src/meta/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -412,6 +412,7 @@ impl BlobMetaInfo {
chunks: chunk_infos,
base: base as *const u8,
unmap_len: expected_size,
is_stargz: blob_info.is_stargz(),
});

Ok(BlobMetaInfo { state })
Expand Down Expand Up @@ -470,7 +471,9 @@ impl BlobMetaInfo {
index += 1;
let entry = &infos[index];
self.validate_chunk(entry)?;
if entry.uncompressed_offset() != last_end {

// For stargz chunks, disable this check.
if !self.state.is_stargz && entry.uncompressed_offset() != last_end {
return Err(einval!(format!(
"mismatch uncompressed {} size {} last_end {}",
entry.uncompressed_offset(),
Expand Down Expand Up @@ -567,7 +570,8 @@ impl BlobMetaInfo {

#[inline]
fn validate_chunk(&self, entry: &BlobChunkInfoOndisk) -> Result<()> {
if entry.compressed_end() > self.state.compressed_size
// For stargz blob, self.state.compressed_size == 0, so don't validate it.
if (!self.state.is_stargz && entry.compressed_end() > self.state.compressed_size)
|| entry.uncompressed_end() > self.state.uncompressed_size
{
Err(einval!())
Expand Down Expand Up @@ -651,6 +655,8 @@ pub struct BlobMetaState {
chunks: ManuallyDrop<Vec<BlobChunkInfoOndisk>>,
base: *const u8,
unmap_len: usize,
/// The blob meta is for an stargz image.
is_stargz: bool,
}

// // Safe to Send/Sync because the underlying data structures are readonly
Expand All @@ -676,6 +682,25 @@ impl BlobMetaState {
let mut start = 0;
let mut end = 0;

if self.is_stargz {
// FIXME: since stargz chunks are not currently allocated chunk index in the order of uncompressed_offset,
// a binary search is not available for now, here is a heavy overhead workaround, need to be fixed.
for i in 0..self.chunk_count {
let off = if compressed {
chunks[i as usize].compressed_offset()
} else {
chunks[i as usize].uncompressed_offset()
};
if addr == off {
return Ok(i as usize);
}
}
return Err(einval!(format!(
"can't find stargz chunk by offset {}",
addr,
)));
}

while left < right {
let mid = left + size / 2;
// SAFETY: the call is made safe by the following invariants:
Expand Down Expand Up @@ -804,6 +829,7 @@ mod tests {
]),
base: std::ptr::null(),
unmap_len: 0,
is_stargz: false,
};

assert_eq!(state.get_chunk_index_nocheck(0, false).unwrap(), 0);
Expand Down Expand Up @@ -888,6 +914,7 @@ mod tests {
]),
base: std::ptr::null(),
unmap_len: 0,
is_stargz: false,
};
let info = BlobMetaInfo {
state: Arc::new(state),
Expand Down

0 comments on commit 253bd20

Please sign in to comment.