diff --git a/src/lib.rs b/src/lib.rs index 01049af..9d55340 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,13 +4,15 @@ #![feature(generic_const_exprs)] // Eww #![allow(incomplete_features)] +use core::fmt::Debug; + use crate::{ diag::Counters, ll::{ - blocks::{BlockHeaderKind, BlockInfo, BlockOps, BlockType}, + blocks::{BlockHeaderKind, BlockInfo, BlockOps, BlockType, IndexedBlockInfo}, objects::{ - MetadataObjectHeader, ObjectHeader, ObjectIterator, ObjectLocation, ObjectReader, - ObjectState, ObjectType, ObjectWriter, + MetadataObjectHeader, ObjectHeader, ObjectInfo, ObjectIterator, ObjectLocation, + ObjectReader, ObjectState, ObjectType, ObjectWriter, }, }, medium::{StorageMedium, StoragePrivate}, @@ -61,35 +63,127 @@ where M: StorageMedium, [(); M::BLOCK_COUNT]:, { - fn find_alloc_block(&self, ty: BlockType, min_free: usize) -> Result { - log::trace!("Storage::find_alloc_block({ty:?}, {min_free})"); - - // Try to find a used block with enough free space - if let Some(block) = self.blocks.iter().position(|info| { - info.header.kind() == BlockHeaderKind::Known(ty) - && !info.is_empty() - && info.free_space() >= min_free - }) { - return Ok(block); - } + async fn allocate_new_object( + &mut self, + ty: BlockType, + min_free: usize, + medium: &mut M, + ) -> Result { + self.allocate_object(ty, min_free, false, medium).await + } - // Pick a free block. Prioritize lesser used blocks. - if let Some((block, _)) = self - .blocks + fn all_blocks(&self) -> impl Iterator> + '_ { + self.blocks .iter() + .copied() .enumerate() - .filter(|(_, info)| { - info.header.kind() == BlockHeaderKind::Known(BlockType::Undefined) - && info.free_space() >= min_free - }) - .min_by_key(|(_, info)| info.header.erase_count()) + .map(|(idx, info)| IndexedBlockInfo(idx, info)) + } + + fn blocks(&self, ty: BlockType) -> impl Iterator> + '_ { + self.all_blocks().filter(move |info| info.is_type(ty)) + } + + fn allocate_object_impl( + &self, + ty: BlockType, + min_free: usize, + allow_gc_block: bool, + ) -> Result { + log::trace!("Storage::allocate_object({ty:?}, {min_free}, {allow_gc_block:?})"); + + // Try to find a used block with enough free space + if let Some(block) = self + .blocks(ty) + .find(|info| !info.is_empty() && info.free_space() >= min_free) { - return Ok(block); + return Ok(block.0); + } + if let Some(block) = self.blocks(ty).find(|info| info.free_space() >= min_free) { + return Ok(block.0); + } + + // We reserve 2 blocks for GC. + if allow_gc_block || self.blocks(BlockType::Undefined).count() > 2 { + // Pick a free block. Prioritize lesser used blocks. + if let Some(block) = self + .blocks(BlockType::Undefined) + .filter(|info| info.free_space() >= min_free) + .min_by_key(|info| info.erase_count()) + { + return Ok(block.0); + } } // No block found Err(StorageError::InsufficientSpace) } + + async fn allocate_object( + &mut self, + ty: BlockType, + min_free: usize, + allow_gc_block: bool, + medium: &mut M, + ) -> Result { + let location = self + .allocate_object_impl(ty, min_free, allow_gc_block) + .map(|block| ObjectLocation { + block, + offset: self.blocks[block].used_bytes(), + })?; + + if self.blocks[location.block].is_unassigned() { + log::debug!("Setting block {} to {ty:?}", location.block); + BlockOps::new(medium) + .set_block_type(location.block, ty) + .await?; + self.blocks[location.block].header.set_block_type(ty); + } + + Ok(location) + } + + pub(crate) async fn find_block_to_free( + &mut self, + medium: &mut M, + ) -> Result, usize)>, StorageError> { + let mut target_block = None::<(IndexedBlockInfo, usize)>; + + // Select block with enough freeable space and minimum erase counter + for info in self.all_blocks().filter(|block| !block.is_empty()) { + let freeable = info.calculate_freeable_space(medium).await?; + + match target_block { + Some((idx, potential)) => { + if freeable > potential + || (potential == freeable && info.erase_count() < idx.erase_count()) + { + target_block = Some((info, freeable)); + } + } + + None => target_block = Some((info, freeable)), + } + } + + Ok(target_block) + } + + async fn format(&mut self, block_to_free: usize, medium: &mut M) -> Result<(), StorageError> { + BlockOps::new(medium).format_block(block_to_free).await?; + self.blocks[block_to_free].update_stats_after_erase(); + + Ok(()) + } + + async fn format_indexed( + &mut self, + block_to_free: IndexedBlockInfo, + medium: &mut M, + ) -> Result<(), StorageError> { + self.format(block_to_free.0, medium).await + } } /// A mounted storage partition. @@ -270,7 +364,7 @@ where let mut used_bytes = 0; for (block_idx, info) in self.blocks.blocks.iter().enumerate() { - match info.header.kind() { + match info.kind() { BlockHeaderKind::Empty => {} BlockHeaderKind::Known(BlockType::Undefined) | BlockHeaderKind::Unknown => { used_bytes += info.used_bytes(); @@ -309,6 +403,9 @@ where if_exists: OnCollision, ) -> Result<(), StorageError> { log::debug!("Storage::store({path}, len = {})", data.len()); + + self.make_space_for(path.len(), data.len()).await?; + let overwritten_location = self.lookup(path).await; let overwritten = match overwritten_location { @@ -318,6 +415,10 @@ where }; if overwritten.is_none() || if_exists == OnCollision::Overwrite { + if let Some(overwritten) = overwritten { + log::debug!("Overwriting location: {:?}", overwritten); + } + self.create_new_file(path, data).await?; if let Some(location) = overwritten { @@ -326,6 +427,7 @@ where Ok(()) } else { + log::debug!("File already exists at path: {}", path); Err(StorageError::InvalidOperation) } } @@ -366,17 +468,69 @@ where Ok(size) } + fn estimate_data_chunks(&self, mut len: usize) -> Result { + let mut block_count = 0; + + for (ty, skip) in [(BlockType::Data, 0), (BlockType::Undefined, 2)] { + for block in self.blocks.blocks(ty).skip(skip) { + let space = block.free_space(); + if space > ObjectHeader::byte_count::() { + len = len.saturating_sub(space - ObjectHeader::byte_count::()); + block_count += 1; + + if len == 0 { + return Ok(block_count); + } + } + } + } + + Err(StorageError::InsufficientSpace) + } + + async fn make_space_for(&mut self, path_len: usize, len: usize) -> Result<(), StorageError> { + let mut meta_allocated = false; + loop { + let blocks = match self.estimate_data_chunks( + M::align(len) + M::align(path_len) + M::align(ObjectHeader::byte_count::()), + ) { + Ok(blocks) => blocks, + Err(StorageError::InsufficientSpace) => { + DataObject.try_to_make_space(self).await?; + continue; + } + Err(e) => return Err(e), + }; + + if meta_allocated { + // Hopefully, freeing space didn't free the metadata block. If it did, we'll + // exit with insufficient space error later. + break; + } + + let meta_size = + ObjectHeader::byte_count::() + 4 + (blocks + 1) * M::object_location_bytes(); + + match self + .blocks + .allocate_new_object(BlockType::Metadata, meta_size, &mut self.medium) + .await + { + Ok(_) => meta_allocated = true, + Err(StorageError::InsufficientSpace) => MetaObject.try_to_make_space(self).await?, + Err(e) => return Err(e), + } + } + + log::trace!("Storage::make_space_for({len}) done"); + Ok(()) + } + async fn lookup(&mut self, path: &str) -> Result { let path_hash = hash_path(path); - for block_idx in self - .blocks - .blocks - .iter() - .enumerate() - .filter_map(|(idx, blk)| blk.is_metadata().then_some(idx)) - { - let mut iter = ObjectIterator::new::(block_idx); + for block in self.blocks.blocks(BlockType::Metadata) { + let mut iter = block.objects(); 'objs: while let Some(object) = iter.next(&mut self.medium).await? { if object.state() != ObjectState::Finalized { @@ -423,16 +577,21 @@ where async fn delete_file_at(&mut self, meta_location: ObjectLocation) -> Result<(), StorageError> { let mut metadata = meta_location.read_metadata(&mut self.medium).await?; - metadata - .object - .update_state(&mut self.medium, ObjectState::Deleted) - .await?; + debug_assert_ne!(metadata.object.state(), ObjectState::Free); + + if let Some(filename_object) = + ObjectInfo::read(metadata.filename_location, &mut self.medium).await? + { + filename_object.delete(&mut self.medium).await?; + } while let Some(location) = metadata.next_object_location(&mut self.medium).await? { let mut header = ObjectHeader::read(location, &mut self.medium).await?; - header - .update_state(&mut self.medium, ObjectState::Deleted) - .await?; + if header.state() != ObjectState::Free { + header + .update_state(&mut self.medium, ObjectState::Deleted) + .await?; + } } metadata @@ -466,20 +625,23 @@ where } async fn create_new_file(&mut self, path: &str, mut data: &[u8]) -> Result<(), StorageError> { + log::trace!("Storage::create_new_file({:?})", path); + if path.contains(&['/', '\\'][..]) { + log::warn!("Path contains invalid characters"); return Err(StorageError::InvalidOperation); } let path_hash = hash_path(path); - // filename + 1 data page - let est_page_count = 1 + 1; // TODO: guess the number of data pages needed + // filename + data objects + let est_page_count = 1 + self.estimate_data_chunks(data.len())?; - // this is mutable because we can fail mid-writing + // this is mutable because we can fail mid-writing. 4 bytes to store the path hash let mut file_meta_location = self .find_new_object_location( BlockType::Metadata, - est_page_count * M::align(M::object_location_bytes()), + 4 + est_page_count * M::object_location_bytes(), ) .await?; @@ -576,6 +738,37 @@ where Ok(()) } + async fn find_metadata_of_object( + &mut self, + object: &ObjectInfo, + ) -> Result, StorageError> { + log::trace!("Storage::find_metadata_of_object({:?})", object.location()); + for block in self.blocks.blocks(BlockType::Metadata) { + let mut objects = block.objects(); + while let Some(meta_object) = objects.next(&mut self.medium).await? { + match meta_object.state() { + ObjectState::Free => break, + ObjectState::Allocated => break, + ObjectState::Finalized => {} + ObjectState::Deleted => continue, + } + let mut meta = meta_object.read_metadata(&mut self.medium).await?; + while let Some(loc) = meta.next_object_location(&mut self.medium).await? { + if loc == object.location() { + log::trace!( + "Storage::find_metadata_of_object({:?}) -> {:?}", + object.location(), + meta_object.location() + ); + return Ok(meta_object); + } + } + } + } + + Err(StorageError::NotFound) + } + async fn find_new_object_location( &mut self, ty: BlockType, @@ -584,25 +777,206 @@ where log::trace!("Storage::find_new_object_location({ty:?}, {len})"); // find block with most free space - let block = self + let object_size = M::align(ObjectHeader::byte_count::()) + len; + let location = self .blocks - .find_alloc_block(ty, M::align(ObjectHeader::byte_count::()) + len)?; + .allocate_new_object(ty, object_size, &mut self.medium) + .await?; - if self.blocks.blocks[block].header.kind() == BlockHeaderKind::Known(BlockType::Undefined) { - BlockOps::new(&mut self.medium) - .set_block_type(block, ty) - .await?; - self.blocks.blocks[block].header.set_block_type(ty); + log::trace!("Storage::find_new_object_location({ty:?}, {len}) -> {location:?}"); + + Ok(location) + } +} + +// Async functions can't be recursive. Splitting out implementation for each block type means +// we can reuse code without recursion. +trait ObjectMover: Debug { + const BLOCK_TYPE: BlockType; + + async fn move_object( + &mut self, + storage: &mut Storage, + object: ObjectInfo, + destination: ObjectLocation, + ) -> Result, StorageError> + where + M: StorageMedium, + [(); M::BLOCK_COUNT]:; + + async fn try_to_make_space(&mut self, storage: &mut Storage) -> Result<(), StorageError> + where + M: StorageMedium, + [(); M::BLOCK_COUNT]:, + { + log::debug!("{self:?}::try_to_make_space()"); + let Some((block_to_free, freeable)) = storage.blocks + .find_block_to_free(&mut storage.medium) + .await? + else { + log::debug!("Could not find a block to free"); + return Err(StorageError::InsufficientSpace); + }; + + if freeable != block_to_free.used_bytes() { + log::debug!("{self:?}::try_to_make_space(): Moving objects out of block to free"); + // We need to move objects out of this block + let mut iter = block_to_free.objects(); + + while let Some(object) = iter.next(&mut storage.medium).await? { + match object.state() { + ObjectState::Free | ObjectState::Deleted => continue, + ObjectState::Allocated => { + log::warn!("Encountered an allocated object"); + // TODO: retry in a different object + return Err(StorageError::InsufficientSpace); + } + ObjectState::Finalized => {} + } + + let copy_location = storage + .blocks + .allocate_object( + Self::BLOCK_TYPE, + object.total_size(), + true, + &mut storage.medium, + ) + .await + .map_err(|_| StorageError::InsufficientSpace)?; + + self.move_object(storage, object, copy_location).await?; + } } - let location = ObjectLocation { - block, - offset: self.blocks.blocks[block].used_bytes(), + storage + .blocks + .format_indexed(block_to_free, &mut storage.medium) + .await + } +} + +#[derive(Debug)] +struct DataObject; + +impl ObjectMover for DataObject { + const BLOCK_TYPE: BlockType = BlockType::Data; + + async fn move_object( + &mut self, + storage: &mut Storage, + object: ObjectInfo, + destination: ObjectLocation, + ) -> Result, StorageError> + where + M: StorageMedium, + [(); M::BLOCK_COUNT]:, + { + log::trace!("{self:?}::move_object"); + + let mut meta = storage.find_metadata_of_object(&object).await?; + let new_meta_location = match storage + .find_new_object_location(BlockType::Metadata, meta.total_size()) + .await + { + Ok(loc) => loc, + Err(StorageError::InsufficientSpace) => { + MetaObject.try_to_make_space(storage).await?; + let new = storage + .find_new_object_location(BlockType::Metadata, meta.total_size()) + .await?; + // Look up again in case it was moved + meta = storage.find_metadata_of_object(&object).await?; + new + } + Err(e) => return Err(e), }; - log::trace!("Storage::find_new_object_location({ty:?}, {len}) -> {location:?}"); + log::debug!( + "Moving data object {:?} to {destination:?}", + object.location() + ); + log::debug!( + "Moving meta object {:?} to {new_meta_location:?}", + meta.location() + ); - Ok(location) + // copy metadata object while replacing current object location to new location + let mut meta_writer = ObjectWriter::allocate( + new_meta_location, + ObjectType::FileMetadata, + &mut storage.medium, + ) + .await?; + let mut old_object_reader = meta.read_metadata(&mut storage.medium).await?; + + // copy header + meta_writer + .write( + &mut storage.medium, + &old_object_reader.path_hash.to_le_bytes(), + ) + .await?; + let (bytes, byte_count) = old_object_reader.filename_location.into_bytes::(); + meta_writer + .write(&mut storage.medium, &bytes[..byte_count]) + .await?; + + // copy object locations + while let Some(loc) = old_object_reader + .next_object_location(&mut storage.medium) + .await? + { + let location = if loc == object.location() { + destination + } else { + loc + }; + + let (bytes, byte_count) = location.into_bytes::(); + meta_writer + .write(&mut storage.medium, &bytes[..byte_count]) + .await?; + } + + // copy data object + let copied = object.copy_object(&mut storage.medium, destination).await?; + storage.blocks.blocks[copied.location().block].add_used_bytes(copied.total_size()); + + // finalize metadata object + let meta_info = meta_writer.finalize(&mut storage.medium).await?; + storage.blocks.blocks[meta_info.location().block].add_used_bytes(meta_info.total_size()); + + // delete old metadata object + meta.delete(&mut storage.medium).await?; + // delete old object + object.delete(&mut storage.medium).await?; + + Ok(copied) + } +} + +#[derive(Debug)] +struct MetaObject; + +impl ObjectMover for MetaObject { + const BLOCK_TYPE: BlockType = BlockType::Metadata; + + async fn move_object( + &mut self, + storage: &mut Storage, + object: ObjectInfo, + destination: ObjectLocation, + ) -> Result, StorageError> + where + M: StorageMedium, + [(); M::BLOCK_COUNT]:, + { + log::trace!("{self:?}::move_object"); + let info = object.move_object(&mut storage.medium, destination).await?; + storage.blocks.blocks[destination.block].add_used_bytes(info.total_size()); + + Ok(info) } } @@ -638,7 +1012,7 @@ mod test { ram_nor_emulating::NorRamStorage, }; - const LIPSUM: &[u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce in mi scelerisque, porttitor mi amet."; + const LIPSUM: &[u8] = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce i"; pub fn init_test() { _ = simple_logger::SimpleLogger::new() @@ -837,15 +1211,15 @@ mod test { let mut reader = storage.read("foo").await.expect("Failed to open file"); - let mut buf = [0u8; 100]; + let mut buf = [0u8; 64]; // Read in two chunks to test that the reader resumes with the current byte reader - .read(&mut storage, &mut buf[0..50]) + .read(&mut storage, &mut buf[0..32]) .await .expect("Failed to read file"); reader - .read(&mut storage, &mut buf[50..]) + .read(&mut storage, &mut buf[32..]) .await .expect("Failed to read file"); @@ -888,6 +1262,19 @@ mod test { assert_file_contents(&mut storage, "foo", b"bar").await; assert_file_contents(&mut storage, "baz", b"asdf").await; } + + async fn can_reuse_space_of_deleted_files( + mut storage: Storage, + ) { + for _ in 0..50 { + storage + .store("foo", LIPSUM, OnCollision::Overwrite) + .await + .expect("Failed to create"); + + storage.delete("foo").await.expect("Failed to delete"); + } + } } #[async_std::test] @@ -908,7 +1295,7 @@ mod test { .store("bar", LIPSUM, OnCollision::Overwrite) .await .is_err(), - "Lookup returned Ok unexpectedly" + "Store returned Ok unexpectedly" ); } } diff --git a/src/ll/blocks.rs b/src/ll/blocks.rs index 5ce5ce8..6dd94fa 100644 --- a/src/ll/blocks.rs +++ b/src/ll/blocks.rs @@ -1,4 +1,7 @@ -use core::marker::PhantomData; +use core::{ + marker::PhantomData, + ops::{Deref, DerefMut}, +}; use crate::{ ll::objects::{ObjectIterator, ObjectState}, @@ -238,6 +241,7 @@ impl BlockInfo { pub fn update_stats_after_erase(&mut self) { self.header.erase_count += 1; + self.header.header = BlockHeaderKind::Known(BlockType::Undefined); self.used_bytes = BlockHeader::::byte_count(); self.allow_alloc = true; } @@ -261,6 +265,70 @@ impl BlockInfo { pub fn used_bytes(&self) -> usize { self.used_bytes } + + pub fn kind(&self) -> BlockHeaderKind { + self.header.kind() + } + + pub fn is_type(&self, ty: BlockType) -> bool { + self.header.kind() == BlockHeaderKind::Known(ty) + } + + pub fn is_unassigned(&self) -> bool { + self.is_type(BlockType::Undefined) + } + + pub fn erase_count(&self) -> u32 { + self.header.erase_count + } +} + +pub struct IndexedBlockInfo(pub usize, pub BlockInfo); + +impl Copy for IndexedBlockInfo {} +impl Clone for IndexedBlockInfo { + fn clone(&self) -> Self { + Self(self.0, self.1) + } +} + +impl IndexedBlockInfo { + pub async fn calculate_freeable_space(&self, medium: &mut M) -> Result { + let Self(block, _info) = self; + + let mut iter = ObjectIterator::new::(*block); + + let mut deleted = 0; + + while let Some(object) = iter.next(medium).await? { + match object.state() { + ObjectState::Allocated | ObjectState::Deleted => deleted += object.total_size(), + ObjectState::Free | ObjectState::Finalized => {} + } + } + + let free_space = M::BLOCK_SIZE - iter.current_offset(); + + Ok(free_space + deleted) + } + + pub fn objects(&self) -> ObjectIterator { + ObjectIterator::new::(self.0) + } +} + +impl Deref for IndexedBlockInfo { + type Target = BlockInfo; + + fn deref(&self) -> &Self::Target { + &self.1 + } +} + +impl DerefMut for IndexedBlockInfo { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.1 + } } pub(crate) struct BlockOps<'a, M> { @@ -328,7 +396,7 @@ impl<'a, M: StorageMedium> BlockOps<'a, M> { } if erase { - log::trace!("Erasing block {block}"); + log::debug!("Erasing block {block}"); self.medium.erase(block).await?; } diff --git a/src/ll/objects.rs b/src/ll/objects.rs index 51cdbd2..5302911 100644 --- a/src/ll/objects.rs +++ b/src/ll/objects.rs @@ -88,18 +88,20 @@ impl CompositeObjectState { if current_state > new_state { // Can't go backwards in state + log::error!("Can't change object state from {current_state:?} to {new_state:?}"); return Err(StorageError::InvalidOperation); } if let Self::Allocated(ty, _) = self { // Can't change allocated object type if ty != object_type { + log::error!("Can't change object type from {ty:?} to {object_type:?}"); return Err(StorageError::InvalidOperation); } } let new_data_state = match new_state { - ObjectState::Free => return Err(StorageError::InvalidOperation), + ObjectState::Free => unreachable!(), ObjectState::Allocated => ObjectDataState::Untrusted, ObjectState::Finalized => ObjectDataState::Valid, ObjectState::Deleted => ObjectDataState::Deleted, @@ -207,7 +209,10 @@ impl CompositeObjectState { fn object_type(self) -> Result { match self { Self::Allocated(ty, _) => Ok(ty), - Self::Free => Err(StorageError::InvalidOperation), + Self::Free => { + log::error!("Can't read object of type Free"); + Err(StorageError::InvalidOperation) + } } } } @@ -278,7 +283,7 @@ impl ObjectLocation { } } -#[derive(Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct ObjectHeader { state: CompositeObjectState, payload_size: usize, // At most block size - header @@ -319,7 +324,7 @@ impl ObjectHeader { location: ObjectLocation, object_type: ObjectType, ) -> Result { - log::trace!("ObjectHeader::allocate({location:?}, {object_type:?})",); + log::debug!("ObjectHeader::allocate({location:?}, {object_type:?})",); let state = CompositeObjectState::allocate(medium, location, object_type).await?; @@ -433,7 +438,13 @@ impl MetadataObjectHeader { log::trace!("MetadataObjectHeader::next_object_location()"); let data_offset = 4 + M::object_location_bytes(); // path hash + filename location - if self.data_object_cursor >= self.object.payload_size - data_offset { + if self.data_object_cursor + >= self + .object + .payload_size::() + .map(|size| size - data_offset) + .unwrap_or(0) + { return Ok(None); } @@ -553,6 +564,11 @@ impl ObjectWriter { } if self.space() < data.len() { + log::debug!( + "Insufficient space ({}) to write data ({})", + self.space(), + data.len() + ); return Err(StorageError::InsufficientSpace); } @@ -603,6 +619,7 @@ impl ObjectWriter { pub async fn finalize(mut self, medium: &mut M) -> Result, StorageError> { if self.object.state() != ObjectState::Allocated { + log::error!("Can not finalize object in state {:?}", self.object.state()); return Err(StorageError::InvalidOperation); } @@ -615,6 +632,7 @@ impl ObjectWriter { pub async fn delete(mut self, medium: &mut M) -> Result<(), StorageError> { if let ObjectState::Free | ObjectState::Deleted = self.object.state() { + log::error!("Can not delete object in state {:?}", self.object.state()); return Ok(()); } @@ -649,6 +667,7 @@ impl ObjectReader { // We can read data from unfinalized/deleted objects if the caller allows it. } else { // We can only read data from finalized objects. + log::error!("Trying to read {:?} object", object.state()); return Err(StorageError::FsCorrupted); } } @@ -751,19 +770,60 @@ impl ObjectInfo { }) } - async fn read(location: ObjectLocation, medium: &mut M) -> Result, StorageError> { + pub async fn read( + location: ObjectLocation, + medium: &mut M, + ) -> Result, StorageError> { log::trace!("ObjectInfo::read({location:?})"); - if location.offset + BlockHeader::::byte_count() >= M::BLOCK_SIZE { - return Ok(None); - } - let header = ObjectHeader::read(location, medium).await?; + log::trace!("ObjectInfo::read({location:?}) -> {header:?}"); + if header.state().is_free() { return Ok(None); } Ok(Some(Self::with_header(header))) } + + pub async fn copy_object( + &self, + medium: &mut M, + dst: ObjectLocation, + ) -> Result, StorageError> { + let mut source = ObjectReader::new(self.location(), medium, false).await?; + let mut target = ObjectWriter::allocate(dst, self.header.object_type()?, medium).await?; + + let mut buffer = [0; 16]; + while source.remaining() > 0 { + let read_size = source.read(medium, &mut buffer).await?; + target.write(medium, &buffer[0..read_size]).await?; + } + + target.finalize(medium).await + } + + pub async fn move_object( + self, + medium: &mut M, + dst: ObjectLocation, + ) -> Result, StorageError> { + let new = self.copy_object(medium, dst).await?; + self.delete(medium).await?; + + Ok(new) + } + + pub async fn finalize(mut self, medium: &mut M) -> Result { + self.header + .update_state(medium, ObjectState::Finalized) + .await?; + + Ok(self) + } + + pub async fn delete(mut self, medium: &mut M) -> Result<(), StorageError> { + self.header.update_state(medium, ObjectState::Deleted).await + } } pub struct ObjectIterator { @@ -784,10 +844,15 @@ impl ObjectIterator { &mut self, medium: &mut M, ) -> Result>, StorageError> { + if self.location.offset + ObjectHeader::byte_count::() >= M::BLOCK_SIZE { + return Ok(None); + } + let info = ObjectInfo::read(self.location, medium).await?; if let Some(info) = info.as_ref() { self.location.offset += M::align(info.total_size()); } + Ok(info) }