Skip to content

Commit

Permalink
storage: a series fixups for stargz chunks
Browse files Browse the repository at this point in the history
Signed-off-by: Yan Song <[email protected]>
  • Loading branch information
imeoer committed Jun 18, 2022
1 parent d063ecb commit 2281c2d
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 75 deletions.
11 changes: 10 additions & 1 deletion src/bin/nydusd/fs_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,16 @@ impl FsCacheHandler {
}
Some(obj) => match obj.fetch_range_uncompressed(msg.off, msg.len) {
Ok(v) if v == msg.len as usize => {}
_ => debug!("fscache: failed to read data from blob object"),
Ok(v) => {
warn!(
"fscache: read data from blob object not matched: {} != {}",
v, msg.len
);
}
Err(e) => error!(
"{}",
format!("fscache: failed to read data from blob object: {}", e,)
),
},
}
}
Expand Down
16 changes: 16 additions & 0 deletions storage/src/cache/cachedfile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,22 @@ impl BlobObject for FileCacheEntry {

impl FileCacheEntry {
fn do_fetch_chunks(&self, chunks: &[BlobIoChunk]) -> Result<usize> {
if self.is_stargz() {
for chunk in chunks {
let mut buf = alloc_buf(chunk.uncompress_size() as usize);
self.read_raw_chunk(chunk, &mut buf, false, None)?;
if self.dio_enabled {
self.adjust_buffer_for_dio(&mut buf)
}
Self::persist_chunk(&self.file, chunk.uncompress_offset(), &buf)
.map_err(|e| eio!(format!("do_fetch_chunk failed to persist data, {:?}", e)))?;
self.chunk_map
.set_ready_and_clear_pending(chunk.as_base())
.unwrap_or_else(|e| error!("set ready failed, {}", e));
}
return Ok(0);
}

debug_assert!(!chunks.is_empty());
let bitmap = self
.chunk_map
Expand Down
5 changes: 1 addition & 4 deletions storage/src/cache/fscache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,6 @@ impl FileCacheEntry {
if blob_info.has_feature(BlobFeatures::V5_NO_EXT_BLOB_TABLE) {
return Err(einval!("fscache does not support Rafs v5 blobs"));
}
if blob_info.is_stargz() {
return Err(einval!("fscache does not support stargz blob file"));
}
let file = blob_info
.get_fscache_file()
.ok_or_else(|| einval!("No fscache file associated with the blob_info"))?;
Expand Down Expand Up @@ -211,7 +208,7 @@ impl FileCacheEntry {
is_get_blob_object_supported: true,
is_compressed: false,
is_direct_chunkmap: true,
is_stargz: false,
is_stargz: blob_info.is_stargz(),
dio_enabled: true,
need_validate: mgr.validate,
prefetch_config,
Expand Down
98 changes: 28 additions & 70 deletions storage/src/meta/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -471,14 +471,16 @@ impl BlobMetaInfo {
index += 1;
let entry = &infos[index];
self.validate_chunk(entry)?;
if entry.uncompressed_offset() != last_end {
return Err(einval!(format!(
"mismatch uncompressed {} size {} last_end {}",
entry.uncompressed_offset(),
entry.uncompressed_size(),
last_end
)));
}

// FIXME: for stargz chunks, disable this check.
// if entry.uncompressed_offset() != last_end {
// return Err(einval!(format!(
// "mismatch uncompressed {} size {} last_end {}",
// entry.uncompressed_offset(),
// entry.uncompressed_size(),
// last_end
// )));
// }

// Avoid read amplify if next chunk is too big.
if last_end >= end && entry.aligned_uncompressed_end() > batch_end {
Expand Down Expand Up @@ -568,9 +570,9 @@ impl BlobMetaInfo {

#[inline]
fn validate_chunk(&self, entry: &BlobChunkInfoOndisk) -> Result<()> {
if entry.compressed_end() > self.state.compressed_size
|| entry.uncompressed_end() > self.state.uncompressed_size
{
// For stargz blob, self.state.compressed_size == 0, so don't validate
// entry.compressed_end() > self.state.compressed_size.
if entry.uncompressed_end() > self.state.uncompressed_size {
Err(einval!())
} else {
Ok(())
Expand Down Expand Up @@ -677,6 +679,13 @@ impl BlobMetaState {
let mut start = 0;
let mut end = 0;

// FIXME: it's workaround for stargz chunks.
for i in 0..self.chunk_count {
if addr == chunks[i as usize].uncompressed_offset() {
return Ok(i as usize);
}
}

while left < right {
let mid = left + size / 2;
// SAFETY: the call is made safe by the following invariants:
Expand Down Expand Up @@ -827,78 +836,27 @@ mod tests {
assert_eq!(chunk.uncompressed_size(), 1);
assert_eq!(chunk.aligned_uncompressed_end(), 0x1000);

// chunk.set_compressed_offset(0x1000);
// chunk.set_compressed_size(0x100);
// assert_eq!(chunk.compressed_offset(), 0x1000);
// assert_eq!(chunk.compressed_size(), 0x100);
// assert_eq!(chunk.compressed_end(), 0x1100);
// chunk.set_uncompressed_offset(0x2000);
// chunk.set_uncompressed_size(0x100);
// assert_eq!(chunk.uncompressed_offset(), 0x2000);
// assert_eq!(chunk.uncompressed_size(), 0x100);
// assert_eq!(chunk.uncompressed_end(), 0x2100);
// assert_eq!(chunk.aligned_uncompressed_end(), 0x3000);
// assert!(!chunk.is_compressed());

// chunk.set_uncompressed_size(0x200);
// assert_eq!(chunk.uncompressed_size(), 0x200);
// assert!(chunk.is_compressed());

// chunk.set_uncompressed_size(0x100000);
// assert_eq!(chunk.uncompressed_size(), 0x100000);

// chunk.set_uncompressed_size(0x100000 + 0x100);
// assert_eq!(chunk.uncompressed_size(), 0x100000 + 0x100);

chunk.set_compressed_offset(0x1000);
assert_eq!(chunk.compressed_offset(), 0x1000);

chunk.set_compressed_size(0x100);
assert_eq!(chunk.compressed_size(), 0x100);

chunk.set_compressed_offset(0x1000);
assert_eq!(chunk.compressed_offset(), 0x1000);

chunk.set_compressed_size(0x100);
assert_eq!(chunk.compressed_size(), 0x100);

chunk.set_uncompressed_offset(0x1000);
assert_eq!(chunk.uncompressed_offset(), 0x1000);

chunk.set_uncompressed_size(0x100);
assert_eq!(chunk.uncompressed_size(), 0x100);

chunk.set_uncompressed_offset(0x1000);
assert_eq!(chunk.uncompressed_offset(), 0x1000);

chunk.set_uncompressed_size(0x100);
assert_eq!(chunk.uncompressed_size(), 0x100);

//

chunk.set_compressed_offset(0x100000 + 0x100);
assert_eq!(chunk.compressed_offset(), 0x100000 + 0x100);

chunk.set_compressed_size(0x100000 + 0x100);
assert_eq!(chunk.compressed_size(), 0x100000 + 0x100);

chunk.set_compressed_offset(0x100000 + 0x100);
assert_eq!(chunk.compressed_offset(), 0x100000 + 0x100);

chunk.set_compressed_size(0x100000 + 0x100);
assert_eq!(chunk.compressed_size(), 0x100000 + 0x100);

chunk.set_uncompressed_offset(0x100000 + 0x100);
assert_eq!(chunk.uncompressed_offset(), 0x100000);

chunk.set_uncompressed_size(0x100000 + 0x100);
assert_eq!(chunk.uncompressed_size(), 0x100000 + 0x100);

chunk.set_uncompressed_offset(0x100000 + 0x100);
assert_eq!(chunk.uncompressed_offset(), 0x100000);
chunk.set_compressed_offset(0x1000000);
chunk.set_compressed_size(0x1000000);
assert_eq!(chunk.compressed_offset(), 0x1000000);
assert_eq!(chunk.compressed_size(), 0x1000000);

chunk.set_uncompressed_size(0x100000 + 0x100);
assert_eq!(chunk.uncompressed_size(), 0x100000 + 0x100);
chunk.set_uncompressed_offset(0x1000000);
chunk.set_uncompressed_size(0x1000000);
assert_eq!(chunk.uncompressed_offset(), 0x1000000);
assert_eq!(chunk.uncompressed_size(), 0x1000000);
}

#[test]
Expand Down

0 comments on commit 2281c2d

Please sign in to comment.