From 02a83745ed01ad8bfa84b7d4b7856dbf7ea39b03 Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Mon, 18 Mar 2024 15:25:12 +0100 Subject: [PATCH 01/10] Force everyone to use `&mut impl Cache` --- src/cache/mod.rs | 3 +- src/cache/tests.rs | 16 ++-- src/lib.rs | 7 +- src/map.rs | 211 ++++++++++++++++++--------------------------- src/queue.rs | 177 +++++++++++++++++-------------------- 5 files changed, 176 insertions(+), 238 deletions(-) diff --git a/src/cache/mod.rs b/src/cache/mod.rs index 05acf43..e37836a 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -23,11 +23,10 @@ pub(crate) use page_states::PageStatesCache; /// Trait implemented by all cache types #[allow(private_bounds)] pub trait CacheImpl: PrivateCacheImpl {} -impl CacheImpl for &mut T {} + /// Trait implemented by all cache types that know about keys #[allow(private_bounds)] pub trait KeyCacheImpl: CacheImpl + PrivateKeyCacheImpl {} -impl> KeyCacheImpl for &mut T {} pub(crate) trait Invalidate { fn invalidate_cache_state(&mut self); diff --git a/src/cache/tests.rs b/src/cache/tests.rs index 2f3f35d..cc46a79 100644 --- a/src/cache/tests.rs +++ b/src/cache/tests.rs @@ -56,7 +56,7 @@ mod queue_tests { ); } - async fn run_test(mut cache: impl CacheImpl) -> FlashStatsResult { + async fn run_test(cache: &mut impl CacheImpl) -> FlashStatsResult { let mut flash = mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); const FLASH_RANGE: Range = 0x00..0x400; @@ -69,11 +69,11 @@ mod queue_tests { let data = vec![i as u8; i % 20 + 1]; println!("PUSH"); - push(&mut flash, FLASH_RANGE, &mut cache, &data, true) + push(&mut flash, FLASH_RANGE, cache, &data, true) .await .unwrap(); assert_eq!( - peek(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer) + peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) .await .unwrap() .unwrap(), @@ -82,7 +82,7 @@ mod queue_tests { ); println!("POP"); assert_eq!( - pop(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer) + pop(&mut flash, FLASH_RANGE, cache, &mut data_buffer) .await .unwrap() .unwrap(), @@ -91,7 +91,7 @@ mod queue_tests { ); println!("PEEK"); assert_eq!( - peek(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer) + peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) .await .unwrap(), None, @@ -257,7 +257,7 @@ mod map_tests { } } - async fn run_test(mut cache: impl KeyCacheImpl) -> FlashStatsResult { + async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { let mut flash = mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); const FLASH_RANGE: Range = 0x00..0x400; @@ -276,7 +276,7 @@ mod map_tests { value: vec![i as u8; LENGHT_PER_KEY[i]], }; - store_item::<_, _>(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer, &item) + store_item::<_, _>(&mut flash, FLASH_RANGE, cache, &mut data_buffer, &item) .await .unwrap(); } @@ -285,7 +285,7 @@ mod map_tests { let item = fetch_item::( &mut flash, FLASH_RANGE, - &mut cache, + cache, &mut data_buffer, i as u8, ) diff --git a/src/lib.rs b/src/lib.rs index a8cf6f3..1746ce2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,8 +14,6 @@ use core::{ }; use embedded_storage_async::nor_flash::NorFlash; -use crate::cache::NoCache; - pub mod cache; mod item; pub mod map; @@ -50,15 +48,16 @@ impl DerefMut for AlignedBuf { async fn try_general_repair( flash: &mut S, flash_range: Range, + cache: &mut impl PrivateCacheImpl, ) -> Result<(), BasicError> { // Loop through the pages and get their state. If one returns the corrupted error, // the page is likely half-erased. Fix for that is to re-erase again to hopefully finish the job. for page_index in get_pages::(flash_range.clone(), 0) { if matches!( - get_page_state(flash, flash_range.clone(), &mut NoCache::new(), page_index).await, + get_page_state(flash, flash_range.clone(), cache, page_index).await, Err(BasicError::Corrupted { .. }) ) { - open_page(flash, flash_range.clone(), &mut NoCache::new(), page_index).await?; + open_page(flash, flash_range.clone(), cache, page_index).await?; } } diff --git a/src/map.rs b/src/map.rs index 2e34622..b11c2c6 100644 --- a/src/map.rs +++ b/src/map.rs @@ -83,7 +83,7 @@ //! fetch_item::( //! &mut flash, //! flash_range.clone(), -//! NoCache::new(), +//! &mut NoCache::new(), //! &mut data_buffer, //! 42, //! ).await.unwrap(), @@ -95,7 +95,7 @@ //! store_item::( //! &mut flash, //! flash_range.clone(), -//! NoCache::new(), +//! &mut NoCache::new(), //! &mut data_buffer, //! &MyCustomType { key: 42, data: 104729 }, //! ).await.unwrap(); @@ -106,7 +106,7 @@ //! fetch_item::( //! &mut flash, //! flash_range.clone(), -//! NoCache::new(), +//! &mut NoCache::new(), //! &mut data_buffer, //! 42, //! ).await.unwrap(), @@ -139,7 +139,7 @@ use super::*; pub async fn fetch_item( flash: &mut S, flash_range: Range, - mut cache: impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], search_key: I::Key, ) -> Result, Error> { @@ -152,14 +152,14 @@ pub async fn fetch_item( fetch_item_with_location( flash, flash_range.clone(), - &mut cache, + cache, data_buffer, search_key.clone(), ) .await .map(|value| value.map(|(item, _, _)| item)) }, - repair = try_repair::(flash, flash_range.clone(), &mut cache, data_buffer).await? + repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? ) } @@ -316,21 +316,20 @@ async fn fetch_item_with_location( pub async fn store_item( flash: &mut S, flash_range: Range, - mut cache: impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], item: &I, ) -> Result<(), Error> { run_with_auto_repair!( - function = - store_item_inner(flash, flash_range.clone(), &mut cache, data_buffer, item).await, - repair = try_repair::(flash, flash_range.clone(), &mut cache, data_buffer).await? + function = store_item_inner(flash, flash_range.clone(), cache, data_buffer, item).await, + repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? ) } async fn store_item_inner( flash: &mut S, flash_range: Range, - mut cache: impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], item: &I, ) -> Result<(), Error> { @@ -355,20 +354,14 @@ async fn store_item_inner( } // If there is a partial open page, we try to write in that first if there is enough space - let next_page_to_use = if let Some(partial_open_page) = find_first_page( - flash, - flash_range.clone(), - &mut cache, - 0, - PageState::PartialOpen, - ) - .await? + let next_page_to_use = if let Some(partial_open_page) = + find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await? { // We found a partial open page, but at this point it's relatively cheap to do a consistency check if !get_page_state( flash, flash_range.clone(), - &mut cache, + cache, next_page::(flash_range.clone(), partial_open_page), ) .await? @@ -398,7 +391,7 @@ async fn store_item_inner( let free_spot_address = find_next_free_item_spot( flash, flash_range.clone(), - &mut cache, + cache, page_data_start_address, page_data_end_address, item_data_length as u32, @@ -411,7 +404,7 @@ async fn store_item_inner( Item::write_new( flash, flash_range.clone(), - &mut cache, + cache, free_spot_address, &data_buffer[..item_data_length], ) @@ -422,7 +415,7 @@ async fn store_item_inner( } None => { // The item doesn't fit here, so we need to close this page and move to the next - close_page(flash, flash_range.clone(), &mut cache, partial_open_page).await?; + close_page(flash, flash_range.clone(), cache, partial_open_page).await?; Some(next_page::(flash_range.clone(), partial_open_page)) } } @@ -438,8 +431,7 @@ async fn store_item_inner( match next_page_to_use { Some(next_page_to_use) => { let next_page_state = - get_page_state(flash, flash_range.clone(), &mut cache, next_page_to_use) - .await?; + get_page_state(flash, flash_range.clone(), cache, next_page_to_use).await?; if !next_page_state.is_open() { // What was the previous buffer page was not open... @@ -452,19 +444,17 @@ async fn store_item_inner( // Since we're gonna write data here, let's already partially close the page // This could be done after moving the data, but this is more robust in the // face of shutdowns and cancellations - partial_close_page(flash, flash_range.clone(), &mut cache, next_page_to_use) - .await?; + partial_close_page(flash, flash_range.clone(), cache, next_page_to_use).await?; let next_buffer_page = next_page::(flash_range.clone(), next_page_to_use); let next_buffer_page_state = - get_page_state(flash, flash_range.clone(), &mut cache, next_buffer_page) - .await?; + get_page_state(flash, flash_range.clone(), cache, next_buffer_page).await?; if !next_buffer_page_state.is_open() { migrate_items::( flash, flash_range.clone(), - &mut cache, + cache, data_buffer, next_buffer_page, next_page_to_use, @@ -474,28 +464,23 @@ async fn store_item_inner( } None => { // There's no partial open page, so we just gotta turn the first open page into a partial open one - let first_open_page = match find_first_page( - flash, - flash_range.clone(), - &mut cache, - 0, - PageState::Open, - ) - .await? - { - Some(first_open_page) => first_open_page, - None => { - // Uh oh, no open pages. - // Something has gone wrong. - // We should never get here. - return Err(Error::Corrupted { - #[cfg(feature = "_test")] - backtrace: std::backtrace::Backtrace::capture(), - }); - } - }; - - partial_close_page(flash, flash_range.clone(), &mut cache, first_open_page).await?; + let first_open_page = + match find_first_page(flash, flash_range.clone(), cache, 0, PageState::Open) + .await? + { + Some(first_open_page) => first_open_page, + None => { + // Uh oh, no open pages. + // Something has gone wrong. + // We should never get here. + return Err(Error::Corrupted { + #[cfg(feature = "_test")] + backtrace: std::backtrace::Backtrace::capture(), + }); + } + }; + + partial_close_page(flash, flash_range.clone(), cache, first_open_page).await?; } } @@ -516,7 +501,7 @@ async fn store_item_inner( pub async fn remove_item( flash: &mut S, flash_range: Range, - mut cache: impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], search_key: I::Key, ) -> Result<(), Error> { @@ -524,19 +509,19 @@ pub async fn remove_item( function = remove_item_inner::( flash, flash_range.clone(), - &mut cache, + cache, data_buffer, search_key.clone() ) .await, - repair = try_repair::(flash, flash_range.clone(), &mut cache, data_buffer).await? + repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? ) } async fn remove_item_inner( flash: &mut S, flash_range: Range, - mut cache: impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], search_key: I::Key, ) -> Result<(), Error> { @@ -545,22 +530,17 @@ async fn remove_item_inner( // Search for the last used page. We're gonna erase from the one after this first. // If we get an early shutoff or cancellation, this will make it so that we don't return // an old version of the key on the next fetch. - let last_used_page = find_first_page( - flash, - flash_range.clone(), - &mut cache, - 0, - PageState::PartialOpen, - ) - .await? - .unwrap_or_default(); + let last_used_page = + find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen) + .await? + .unwrap_or_default(); // Go through all the pages for page_index in get_pages::( flash_range.clone(), next_page::(flash_range.clone(), last_used_page), ) { - if get_page_state(flash, flash_range.clone(), &mut cache, page_index) + if get_page_state(flash, flash_range.clone(), cache, page_index) .await? .is_open() { @@ -591,7 +571,7 @@ async fn remove_item_inner( // But keep going! We need to erase everything. if item_key == search_key { item.header - .erase_data(flash, flash_range.clone(), &mut cache, item_address) + .erase_data(flash, flash_range.clone(), cache, item_address) .await?; } } @@ -713,58 +693,33 @@ async fn migrate_items( async fn try_repair( flash: &mut S, flash_range: Range, - mut cache: impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], ) -> Result<(), Error> { cache.invalidate_cache_state(); - #[allow(dropping_references)] - drop(cache); - crate::try_general_repair(flash, flash_range.clone()).await?; + crate::try_general_repair(flash, flash_range.clone(), cache).await?; // Let's check if we corrupted in the middle of a migration - if let Some(partial_open_page) = find_first_page( - flash, - flash_range.clone(), - &mut cache::NoCache::new(), - 0, - PageState::PartialOpen, - ) - .await? + if let Some(partial_open_page) = + find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await? { let buffer_page = next_page::(flash_range.clone(), partial_open_page); - if !get_page_state( - flash, - flash_range.clone(), - &mut cache::NoCache::new(), - buffer_page, - ) - .await? - .is_open() + if !get_page_state(flash, flash_range.clone(), cache, buffer_page) + .await? + .is_open() { // Yes, the migration got interrupted. Let's redo it. // To do that, we erase the partial open page first because it contains incomplete data. - open_page( - flash, - flash_range.clone(), - &mut cache::NoCache::new(), - partial_open_page, - ) - .await?; + open_page(flash, flash_range.clone(), cache, partial_open_page).await?; // Then partially close it again - partial_close_page( - flash, - flash_range.clone(), - &mut cache::NoCache::new(), - partial_open_page, - ) - .await?; + partial_close_page(flash, flash_range.clone(), cache, partial_open_page).await?; migrate_items::( flash, flash_range.clone(), - &mut cache::NoCache::new(), + cache, data_buffer, buffer_page, partial_open_page, @@ -864,7 +819,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, 0, ) @@ -875,7 +830,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, 60, ) @@ -886,7 +841,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, 0xFF, ) @@ -897,7 +852,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 0, @@ -909,7 +864,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 0, @@ -922,7 +877,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, 0, ) @@ -935,7 +890,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 1, @@ -948,7 +903,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, 0, ) @@ -961,7 +916,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, 1, ) @@ -975,7 +930,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: (index % 10) as u8, @@ -990,7 +945,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i, ) @@ -1005,7 +960,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 11, @@ -1020,7 +975,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i, ) @@ -1051,7 +1006,7 @@ mod tests { store_item::<_, _>( &mut tiny_flash, 0x00..0x40, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &item, ) @@ -1063,7 +1018,7 @@ mod tests { store_item::<_, _>( &mut tiny_flash, 0x00..0x40, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: UPPER_BOUND, @@ -1078,7 +1033,7 @@ mod tests { let item = fetch_item::( &mut tiny_flash, 0x00..0x40, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i, ) @@ -1109,7 +1064,7 @@ mod tests { store_item::<_, _>( &mut big_flash, 0x0000..0x1000, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &item, ) @@ -1121,7 +1076,7 @@ mod tests { store_item::<_, _>( &mut big_flash, 0x0000..0x1000, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: UPPER_BOUND, @@ -1136,7 +1091,7 @@ mod tests { let item = fetch_item::( &mut big_flash, 0x0000..0x1000, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i, ) @@ -1169,7 +1124,7 @@ mod tests { store_item::<_, _>( &mut flash, 0x0000..0x4000, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &item, ) @@ -1182,7 +1137,7 @@ mod tests { let item = fetch_item::( &mut flash, 0x0000..0x4000, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i as u8, ) @@ -1217,7 +1172,7 @@ mod tests { store_item::<_, _>( &mut flash, FLASH_RANGE, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, &item, ) @@ -1232,7 +1187,7 @@ mod tests { assert!(fetch_item::( &mut flash, FLASH_RANGE, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i ) @@ -1245,7 +1200,7 @@ mod tests { remove_item::( &mut flash, FLASH_RANGE, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, j, ) @@ -1257,7 +1212,7 @@ mod tests { assert!(fetch_item::( &mut flash, FLASH_RANGE, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, i ) @@ -1269,7 +1224,7 @@ mod tests { assert!(fetch_item::( &mut flash, FLASH_RANGE, - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer, j ) diff --git a/src/queue.rs b/src/queue.rs index c195537..8ca0466 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -30,26 +30,26 @@ //! let my_data = [10, 47, 29]; //! //! // We can push some data to the queue -//! push(&mut flash, flash_range.clone(), NoCache::new(), &my_data, false).await.unwrap(); +//! push(&mut flash, flash_range.clone(), &mut NoCache::new(), &my_data, false).await.unwrap(); //! //! // We can peek at the oldest data //! //! assert_eq!( -//! &peek(&mut flash, flash_range.clone(), NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..], +//! &peek(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..], //! &my_data[..] //! ); //! //! // With popping we get back the oldest data, but that data is now also removed //! //! assert_eq!( -//! &pop(&mut flash, flash_range.clone(), NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..], +//! &pop(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..], //! &my_data[..] //! ); //! //! // If we pop again, we find there's no data anymore //! //! assert_eq!( -//! pop(&mut flash, flash_range.clone(), NoCache::new(), &mut data_buffer).await, +//! pop(&mut flash, flash_range.clone(), &mut NoCache::new(), &mut data_buffer).await, //! Ok(None) //! ); //! # }); @@ -75,7 +75,7 @@ use embedded_storage_async::nor_flash::MultiwriteNorFlash; pub async fn push( flash: &mut S, flash_range: Range, - mut cache: impl CacheImpl, + cache: &mut impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, ) -> Result<(), Error> { @@ -83,19 +83,19 @@ pub async fn push( function = push_inner( flash, flash_range.clone(), - &mut cache, + cache, data, allow_overwrite_old_data ) .await, - repair = try_repair(flash, flash_range.clone(), &mut cache).await? + repair = try_repair(flash, flash_range.clone(), cache).await? ) } async fn push_inner( flash: &mut S, flash_range: Range, - mut cache: impl CacheImpl, + cache: &mut impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, ) -> Result<(), Error> { @@ -118,21 +118,21 @@ async fn push_inner( return Err(BasicError::BufferTooBig.into()); } - let current_page = find_youngest_page(flash, flash_range.clone(), &mut cache).await?; + let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?; let page_data_start_address = calculate_page_address::(flash_range.clone(), current_page) + S::WORD_SIZE as u32; let page_data_end_address = calculate_page_end_address::(flash_range.clone(), current_page) - S::WORD_SIZE as u32; - partial_close_page(flash, flash_range.clone(), &mut cache, current_page).await?; + partial_close_page(flash, flash_range.clone(), cache, current_page).await?; // Find the last item on the page so we know where we need to write let mut next_address = find_next_free_item_spot( flash, flash_range.clone(), - &mut cache, + cache, page_data_start_address, page_data_end_address, data.len() as u32, @@ -142,10 +142,10 @@ async fn push_inner( if next_address.is_none() { // No cap left on this page, move to the next page let next_page = next_page::(flash_range.clone(), current_page); - match get_page_state(flash, flash_range.clone(), &mut cache, next_page).await? { + match get_page_state(flash, flash_range.clone(), cache, next_page).await? { PageState::Open => { - close_page(flash, flash_range.clone(), &mut cache, current_page).await?; - partial_close_page(flash, flash_range.clone(), &mut cache, next_page).await?; + close_page(flash, flash_range.clone(), cache, current_page).await?; + partial_close_page(flash, flash_range.clone(), cache, next_page).await?; next_address = Some( calculate_page_address::(flash_range.clone(), next_page) + S::WORD_SIZE as u32, @@ -157,22 +157,16 @@ async fn push_inner( + S::WORD_SIZE as u32; if !allow_overwrite_old_data - && !is_page_empty( - flash, - flash_range.clone(), - &mut cache, - next_page, - Some(state), - ) - .await? + && !is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state)) + .await? { cache.unmark_dirty(); return Err(BasicError::FullStorage.into()); } - open_page(flash, flash_range.clone(), &mut cache, next_page).await?; - close_page(flash, flash_range.clone(), &mut cache, current_page).await?; - partial_close_page(flash, flash_range.clone(), &mut cache, next_page).await?; + open_page(flash, flash_range.clone(), cache, next_page).await?; + close_page(flash, flash_range.clone(), cache, current_page).await?; + partial_close_page(flash, flash_range.clone(), cache, next_page).await?; next_address = Some(next_page_data_start_address); } PageState::PartialOpen => { @@ -188,7 +182,7 @@ async fn push_inner( Item::write_new( flash, flash_range.clone(), - &mut cache, + cache, next_address.unwrap(), data, ) @@ -203,11 +197,11 @@ async fn push_inner( /// If you also want to remove the data use [pop_many]. /// /// Returns an iterator-like type that can be used to peek into the data. -pub async fn peek_many( - flash: &mut S, +pub async fn peek_many<'d, S: NorFlash, CI: CacheImpl>( + flash: &'d mut S, flash_range: Range, - cache: CI, -) -> Result, Error> { + cache: &'d mut CI, +) -> Result, Error> { // Note: Corruption repair is done in these functions already Ok(PeekIterator { iter: QueueIterator::new(flash, flash_range, cache).await?, @@ -227,7 +221,7 @@ pub async fn peek_many( pub async fn peek<'d, S: NorFlash>( flash: &mut S, flash_range: Range, - cache: impl CacheImpl, + cache: &mut impl CacheImpl, data_buffer: &'d mut [u8], ) -> Result, Error> { // Note: Corruption repair is done in these functions already @@ -242,11 +236,11 @@ pub async fn peek<'d, S: NorFlash>( /// If you don't want to remove the data use [peek_many]. /// /// Returns an iterator-like type that can be used to pop the data. -pub async fn pop_many( - flash: &mut S, +pub async fn pop_many<'d, S: MultiwriteNorFlash, CI: CacheImpl>( + flash: &'d mut S, flash_range: Range, - cache: CI, -) -> Result, Error> { + cache: &'d mut CI, +) -> Result, Error> { // Note: Corruption repair is done in these functions already Ok(PopIterator { iter: QueueIterator::new(flash, flash_range, cache).await?, @@ -266,7 +260,7 @@ pub async fn pop_many( pub async fn pop<'d, S: MultiwriteNorFlash>( flash: &mut S, flash_range: Range, - cache: impl CacheImpl, + cache: &mut impl CacheImpl, data_buffer: &'d mut [u8], ) -> Result, Error> { pop_many(flash, flash_range, cache) @@ -299,7 +293,7 @@ impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { repair = try_repair( self.iter.flash, self.iter.flash_range.clone(), - &mut self.iter.cache + self.iter.cache ) .await? )?; @@ -382,7 +376,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> PeekIterator<'d, S, CI> { struct QueueIterator<'d, S: NorFlash, CI: CacheImpl> { flash: &'d mut S, flash_range: Range, - cache: CI, + cache: &'d mut CI, current_address: CurrentAddress, } @@ -404,13 +398,13 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { async fn new( flash: &'d mut S, flash_range: Range, - mut cache: CI, + cache: &'d mut CI, ) -> Result> { let start_address = run_with_auto_repair!( - function = Self::find_start_address(flash, flash_range.clone(), &mut cache) + function = Self::find_start_address(flash, flash_range.clone(), cache) .await .map_err(|e| e.into()), - repair = try_repair(flash, flash_range.clone(), &mut cache).await? + repair = try_repair(flash, flash_range.clone(), cache).await? )?; Ok(Self { @@ -424,7 +418,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { async fn find_start_address( flash: &mut S, flash_range: Range, - mut cache: &mut CI, + cache: &mut CI, ) -> Result> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -436,7 +430,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { cache.invalidate_cache_state(); } - let oldest_page = find_oldest_page(flash, flash_range.clone(), &mut cache).await?; + let oldest_page = find_oldest_page(flash, flash_range.clone(), cache).await?; // We start at the start of the oldest page let current_address = match cache.first_item_after_erased(oldest_page) { @@ -455,7 +449,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { ) -> Result, u32)>, Error> { let value = run_with_auto_repair!( function = self.next_inner(data_buffer).await.map_err(|e| e.into()), - repair = try_repair(self.flash, self.flash_range.clone(), &mut self.cache).await? + repair = try_repair(self.flash, self.flash_range.clone(), self.cache).await? ); value.map(|v| v.map(|(item, address)| (item.reborrow(data_buffer), address))) @@ -579,18 +573,18 @@ struct QueueIteratorResetPoint(CurrentAddress); pub async fn find_max_fit( flash: &mut S, flash_range: Range, - mut cache: impl CacheImpl, + cache: &mut impl CacheImpl, ) -> Result, Error> { run_with_auto_repair!( - function = find_max_fit_inner(flash, flash_range.clone(), &mut cache).await, - repair = try_repair(flash, flash_range.clone(), &mut cache).await? + function = find_max_fit_inner(flash, flash_range.clone(), cache).await, + repair = try_repair(flash, flash_range.clone(), cache).await? ) } async fn find_max_fit_inner( flash: &mut S, flash_range: Range, - mut cache: impl CacheImpl, + cache: &mut impl CacheImpl, ) -> Result, Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -602,21 +596,13 @@ async fn find_max_fit_inner( cache.invalidate_cache_state(); } - let current_page = find_youngest_page(flash, flash_range.clone(), &mut cache).await?; + let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?; // Check if we have space on the next page let next_page = next_page::(flash_range.clone(), current_page); - match get_page_state(flash, flash_range.clone(), &mut cache, next_page).await? { + match get_page_state(flash, flash_range.clone(), cache, next_page).await? { state @ PageState::Closed => { - if is_page_empty( - flash, - flash_range.clone(), - &mut cache, - next_page, - Some(state), - ) - .await? - { + if is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state)).await? { cache.unmark_dirty(); return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32)); } @@ -714,12 +700,11 @@ async fn find_oldest_page( async fn try_repair( flash: &mut S, flash_range: Range, - mut cache: impl CacheImpl, + cache: &mut impl CacheImpl, ) -> Result<(), BasicError> { cache.invalidate_cache_state(); - drop(cache); - crate::try_general_repair(flash, flash_range.clone()).await?; + crate::try_general_repair(flash, flash_range.clone(), cache).await?; Ok(()) } @@ -744,7 +729,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -756,7 +741,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data_buffer[..DATA_SIZE], false, ) @@ -766,7 +751,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -778,7 +763,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data_buffer[..DATA_SIZE], false, ) @@ -788,7 +773,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -802,7 +787,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data_buffer[..DATA_SIZE], false, ) @@ -813,7 +798,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data_buffer[..DATA_SIZE], true, ) @@ -824,7 +809,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -836,7 +821,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -849,7 +834,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -861,7 +846,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -874,7 +859,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -885,7 +870,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -907,7 +892,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data, true, ) @@ -917,7 +902,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -930,7 +915,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -943,7 +928,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -968,7 +953,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data, true, ) @@ -978,7 +963,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -992,7 +977,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -1006,7 +991,7 @@ mod tests { peek( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -1179,7 +1164,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data, false, ) @@ -1196,7 +1181,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -1215,7 +1200,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data, false, ) @@ -1232,7 +1217,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -1279,7 +1264,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data_buffer[0..20], false, ) @@ -1289,7 +1274,7 @@ mod tests { push( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &data_buffer[0..20], false, ) @@ -1302,7 +1287,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -1315,7 +1300,7 @@ mod tests { pop( &mut flash, flash_range.clone(), - cache::NoCache::new(), + &mut cache::NoCache::new(), &mut data_buffer ) .await @@ -1331,24 +1316,24 @@ mod tests { const FLASH_RANGE: Range = 0x000..0x1000; - close_page(&mut flash, FLASH_RANGE, &mut cache::NoCache::new(), 0) + close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 0) .await .unwrap(); - close_page(&mut flash, FLASH_RANGE, &mut cache::NoCache::new(), 1) + close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 1) .await .unwrap(); - partial_close_page(&mut flash, FLASH_RANGE, &mut cache::NoCache::new(), 2) + partial_close_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new(), 2) .await .unwrap(); assert_eq!( - find_youngest_page(&mut flash, FLASH_RANGE, &mut cache::NoCache::new()) + find_youngest_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new()) .await .unwrap(), 2 ); assert_eq!( - find_oldest_page(&mut flash, FLASH_RANGE, &mut cache::NoCache::new()) + find_oldest_page(&mut flash, FLASH_RANGE, &mut &mut cache::NoCache::new()) .await .unwrap(), 0 From b0ec015ccdd37165e846bff56a88f1353b6edbd9 Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Mon, 18 Mar 2024 16:59:56 +0100 Subject: [PATCH 02/10] Start to tackle StorageItem trait --- src/lib.rs | 11 +++- src/map.rs | 171 +++++++++++++++++++++++++++-------------------------- 2 files changed, 98 insertions(+), 84 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 1746ce2..c65ebba 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ #![cfg_attr(not(any(test, doctest, feature = "std")), no_std)] -#![deny(missing_docs)] +// #![deny(missing_docs)] #![doc = include_str!("../README.md")] // Assumptions made in this crate: @@ -8,6 +8,7 @@ // - flash write size is quite small, so it writes words and not full pages use cache::PrivateCacheImpl; +use map::KeyError; use core::{ fmt::Debug, ops::{Deref, DerefMut, Range}, @@ -389,6 +390,13 @@ pub enum Error { BufferTooSmall(usize), /// A storage item error Item(I), + KeyError(KeyError), +} + +impl From for Error { + fn from(v: KeyError) -> Self { + Self::KeyError(v) + } } impl PartialEq for Error { @@ -446,6 +454,7 @@ where "A provided buffer was to small to be used. Needed was {needed}" ), Error::Item(value) => write!(f, "Item error: {value}"), + Error::KeyError(value) => write!(f, "Key error: {value:?}"), } } } diff --git a/src/map.rs b/src/map.rs index b11c2c6..5379748 100644 --- a/src/map.rs +++ b/src/map.rs @@ -115,6 +115,8 @@ //! # }); //! ``` +use core::convert::Infallible; + use embedded_storage_async::nor_flash::MultiwriteNorFlash; use crate::item::{find_next_free_item_spot, Item, ItemHeader, ItemIter}; @@ -165,13 +167,13 @@ pub async fn fetch_item( /// Fetch the item, but with the address and header #[allow(clippy::type_complexity)] -async fn fetch_item_with_location( +async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( flash: &mut S, flash_range: Range, - cache: &mut impl PrivateKeyCacheImpl, - data_buffer: &mut [u8], - search_key: I::Key, -) -> Result, Error> { + cache: &mut impl PrivateKeyCacheImpl, + data_buffer: &'d mut [u8], + search_key: K, +) -> Result, u32)>, Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); assert!(flash_range.end - flash_range.start >= S::ERASE_SIZE as u32 * 2); @@ -212,11 +214,7 @@ async fn fetch_item_with_location( break 'cache; } item::MaybeItem::Present(item) => { - return Ok(Some(( - I::deserialize_from(item.data()).map_err(Error::Item)?, - cached_location, - item.header, - ))); + return Ok(Some((item, cached_location))); } } } @@ -259,7 +257,7 @@ async fn fetch_item_with_location( // If we don't find it in the current page, then we check again in the previous page if that page is closed. let mut current_page_to_check = last_used_page.unwrap(); - let mut newest_found_item = None; + let mut newest_found_item_address = None; loop { let page_data_start_address = @@ -271,18 +269,14 @@ async fn fetch_item_with_location( let mut it = ItemIter::new(page_data_start_address, page_data_end_address); while let Some((item, address)) = it.next(flash, data_buffer).await? { - if I::deserialize_key_only(item.data()).map_err(Error::Item)? == search_key { - newest_found_item = Some(( - I::deserialize_from(item.data()).map_err(Error::Item)?, - address, - item.header, - )); + if K::deserialize_from(item.data())?.0 == search_key { + newest_found_item_address = Some(address); } } // We've found the item! We can stop searching - if let Some(newest_found_item) = newest_found_item.as_ref() { - cache.notice_key_location(search_key, newest_found_item.1, false); + if let Some(newest_found_item_address) = newest_found_item_address.as_ref() { + cache.notice_key_location(search_key, *newest_found_item_address, false); break; } @@ -302,7 +296,26 @@ async fn fetch_item_with_location( } cache.unmark_dirty(); - Ok(newest_found_item) + + // We now need to reread the item because we lost all its data other than its address + + if let Some(newest_found_item_address) = newest_found_item_address { + let item = ItemHeader::read_new(flash, newest_found_item_address, u32::MAX) + .await? + .ok_or_else(|| { + // How come there's no item header here!? We just found it! + Error::Corrupted { + #[cfg(feature = "_test")] + backtrace: std::backtrace::Backtrace::capture(), + } + })? + .read_item(flash, data_buffer, newest_found_item_address, u32::MAX) + .await?; + + Ok(Some((item.unwrap()?, newest_found_item_address))) + } else { + Ok(None) + } } /// Store an item into flash memory. @@ -313,24 +326,34 @@ async fn fetch_item_with_location( /// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time /// or types that serialize and deserialize the key in the same way.* -pub async fn store_item( +pub async fn store_item( flash: &mut S, flash_range: Range, cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], + key: K, item: &I, ) -> Result<(), Error> { run_with_auto_repair!( - function = store_item_inner(flash, flash_range.clone(), cache, data_buffer, item).await, + function = store_item_inner( + flash, + flash_range.clone(), + cache, + data_buffer, + key.clone(), + item + ) + .await, repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? ) } -async fn store_item_inner( +async fn store_item_inner( flash: &mut S, flash_range: Range, cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], + key: K, item: &I, ) -> Result<(), Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); @@ -386,6 +409,7 @@ async fn store_item_inner( calculate_page_end_address::(flash_range.clone(), partial_open_page) - S::WORD_SIZE as u32; + todo!("Serialize key into the buffer here too!"); let item_data_length = item.serialize_into(data_buffer).map_err(Error::Item)?; let free_spot_address = find_next_free_item_spot( @@ -451,7 +475,7 @@ async fn store_item_inner( get_page_state(flash, flash_range.clone(), cache, next_buffer_page).await?; if !next_buffer_page_state.is_open() { - migrate_items::( + migrate_items::( flash, flash_range.clone(), cache, @@ -498,15 +522,15 @@ async fn store_item_inner( /// All items in flash have to be read and deserialized to find the items with the key. /// This is unlikely to be cached well. /// -pub async fn remove_item( +pub async fn remove_item( flash: &mut S, flash_range: Range, - cache: &mut impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], - search_key: I::Key, -) -> Result<(), Error> { + search_key: K, +) -> Result<(), Error> { run_with_auto_repair!( - function = remove_item_inner::( + function = remove_item_inner::( flash, flash_range.clone(), cache, @@ -514,17 +538,17 @@ pub async fn remove_item( search_key.clone() ) .await, - repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? + repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? ) } -async fn remove_item_inner( +async fn remove_item_inner( flash: &mut S, flash_range: Range, - cache: &mut impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], - search_key: I::Key, -) -> Result<(), Error> { + search_key: K, +) -> Result<(), Error> { cache.notice_key_erased(&search_key); // Search for the last used page. We're gonna erase from the one after this first. @@ -565,7 +589,7 @@ async fn remove_item_inner( item::MaybeItem::Corrupted(_, _) => continue, item::MaybeItem::Erased(_, _) => continue, item::MaybeItem::Present(item) => { - let item_key = I::deserialize_key_only(item.data()).map_err(Error::Item)?; + let (item_key, _) = K::deserialize_from(item.data())?; // If this item has the same key as the key we're trying to erase, then erase the item. // But keep going! We need to erase everything. @@ -585,51 +609,35 @@ async fn remove_item_inner( Ok(()) } -/// A way of serializing and deserializing items in the storage. -/// -/// Serialized items may not be longer than [u16::MAX]. -/// Items must also fit within a page (together with the bits of overhead added in the storage process). -pub trait StorageItem { - /// The key type of the key-value pair - type Key: Eq + Clone; +pub trait Key: Eq + Clone + Sized { + fn serialize_into(&self, buffer: &mut [u8]) -> Result; + fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), KeyError>; +} + +pub trait MapItem: Sized { /// The error type for serialization and deserialization type Error; - /// Serialize the key-value item into the given buffer. - /// Returns the number of bytes the buffer was filled with or an error. - /// - /// The serialized data does not have to self-describe its length or do framing. - /// This crate already stores the length of any item in flash as a u16. fn serialize_into(&self, buffer: &mut [u8]) -> Result; + fn deserialize_from(buffer: &[u8]) -> Result; +} - /// Deserialize the key-value item from the given buffer. This buffer should be as long - /// as what was serialized before. - fn deserialize_from(buffer: &[u8]) -> Result - where - Self: Sized; - - /// Optimization for deserializing the key only. Can give a small performance boost if - /// your key is easily extractable from the buffer. - fn deserialize_key_only(buffer: &[u8]) -> Result - where - Self: Sized, - { - // This works for any impl, but could be overridden by the user - Ok(Self::deserialize_from(buffer)?.key()) - } - - /// The key of the key-value item. It is used by the storage to know what the key of this item is. - fn key(&self) -> Self::Key; +#[non_exhaustive] +#[derive(Debug)] +#[cfg_attr(feature = "defmt-03", derive(defmt::Format))] +pub enum KeyError { + BufferTooSmall, + InvalidFormat, } -async fn migrate_items( +async fn migrate_items( flash: &mut S, flash_range: Range, - cache: &mut impl PrivateKeyCacheImpl, + cache: &mut impl PrivateKeyCacheImpl, data_buffer: &mut [u8], source_page: usize, target_page: usize, -) -> Result<(), Error> { +) -> Result<(), Error> { // We need to move the data from the next buffer page to the next_page_to_use, but only if that data // doesn't have a newer value somewhere else. @@ -641,11 +649,11 @@ async fn migrate_items( calculate_page_end_address::(flash_range.clone(), source_page) - S::WORD_SIZE as u32, ); while let Some((item, item_address)) = it.next(flash, data_buffer).await? { - let key = I::deserialize_key_only(item.data()).map_err(Error::Item)?; - let (item_header, data_buffer) = item.destruct(); + let (key, _) = K::deserialize_from(item.data())?; + let (_, data_buffer) = item.destruct(); // Search for the newest item with the key we found - let Some((_, found_address, _)) = fetch_item_with_location::( + let Some((found_item, found_address)) = fetch_item_with_location::( flash, flash_range.clone(), cache, @@ -662,16 +670,13 @@ async fn migrate_items( }; if found_address == item_address { - // The newest item with this key is the item we're about to erase - // This means we need to copy it over to the next_page_to_use - let item = item_header - .read_item(flash, data_buffer, item_address, u32::MAX) - .await? - .unwrap()?; cache.notice_key_location(key, next_page_write_address, true); - item.write(flash, flash_range.clone(), cache, next_page_write_address) + found_item + .write(flash, flash_range.clone(), cache, next_page_write_address) .await?; - next_page_write_address = item.header.next_item_address::(next_page_write_address); + next_page_write_address = found_item + .header + .next_item_address::(next_page_write_address); } } @@ -690,12 +695,12 @@ async fn migrate_items( /// If this function or the function call after this crate returns [Error::Corrupted], then it's unlikely /// that the state can be recovered. To at least make everything function again at the cost of losing the data, /// erase the flash range. -async fn try_repair( +async fn try_repair( flash: &mut S, flash_range: Range, - cache: &mut impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], -) -> Result<(), Error> { +) -> Result<(), Error> { cache.invalidate_cache_state(); crate::try_general_repair(flash, flash_range.clone(), cache).await?; @@ -716,7 +721,7 @@ async fn try_repair( // Then partially close it again partial_close_page(flash, flash_range.clone(), cache, partial_open_page).await?; - migrate_items::( + migrate_items::( flash, flash_range.clone(), cache, From 9bfdde368879fb152fd792c1e4e8be3fac21149a Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Mon, 25 Mar 2024 16:58:04 +0100 Subject: [PATCH 03/10] Refactor storage item to have separate key and item --- src/cache/tests.rs | 608 ++++++++++++++++++++++----------------------- src/item.rs | 50 ++-- src/lib.rs | 100 ++------ src/map.rs | 507 ++++++++++++++++++------------------- src/queue.rs | 52 ++-- 5 files changed, 617 insertions(+), 700 deletions(-) diff --git a/src/cache/tests.rs b/src/cache/tests.rs index cc46a79..a2be55f 100644 --- a/src/cache/tests.rs +++ b/src/cache/tests.rs @@ -1,304 +1,304 @@ -#[cfg(test)] -mod queue_tests { - use core::ops::Range; - - use crate::{ - cache::{CacheImpl, NoCache, PagePointerCache, PageStateCache}, - mock_flash::{self, FlashStatsResult, WriteCountCheck}, - queue::{peek, pop, push}, - AlignedBuf, - }; - - use futures_test::test; - - const NUM_PAGES: usize = 4; - const LOOP_COUNT: usize = 2000; - - #[test] - async fn no_cache() { - assert_eq!( - run_test(&mut NoCache::new()).await, - FlashStatsResult { - erases: 146, - reads: 594934, - writes: 6299, - bytes_read: 2766058, - bytes_written: 53299 - } - ); - } - - #[test] - async fn page_state_cache() { - assert_eq!( - run_test(&mut PageStateCache::::new()).await, - FlashStatsResult { - erases: 146, - reads: 308740, - writes: 6299, - bytes_read: 2479864, - bytes_written: 53299 - } - ); - } - - #[test] - async fn page_pointer_cache() { - assert_eq!( - run_test(&mut PagePointerCache::::new()).await, - FlashStatsResult { - erases: 146, - reads: 211172, - writes: 6299, - bytes_read: 1699320, - bytes_written: 53299 - } - ); - } - - async fn run_test(cache: &mut impl CacheImpl) -> FlashStatsResult { - let mut flash = - mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); - const FLASH_RANGE: Range = 0x00..0x400; - let mut data_buffer = AlignedBuf([0; 1024]); - - let start_snapshot = flash.stats_snapshot(); - - for i in 0..LOOP_COUNT { - println!("{i}"); - let data = vec![i as u8; i % 20 + 1]; - - println!("PUSH"); - push(&mut flash, FLASH_RANGE, cache, &data, true) - .await - .unwrap(); - assert_eq!( - peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) - .await - .unwrap() - .unwrap(), - &data, - "At {i}" - ); - println!("POP"); - assert_eq!( - pop(&mut flash, FLASH_RANGE, cache, &mut data_buffer) - .await - .unwrap() - .unwrap(), - &data, - "At {i}" - ); - println!("PEEK"); - assert_eq!( - peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) - .await - .unwrap(), - None, - "At {i}" - ); - println!("DONE"); - } - - start_snapshot.compare_to(flash.stats_snapshot()) - } -} - -#[cfg(test)] -mod map_tests { - use core::ops::Range; - - use crate::{ - cache::{KeyCacheImpl, KeyPointerCache, NoCache, PagePointerCache, PageStateCache}, - map::{fetch_item, store_item, StorageItem}, - mock_flash::{self, FlashStatsResult, WriteCountCheck}, - AlignedBuf, - }; - - use futures_test::test; - - const NUM_PAGES: usize = 4; - - #[test] - async fn no_cache() { - assert_eq!( - run_test(&mut NoCache::new()).await, - FlashStatsResult { - erases: 198, - reads: 224161, - writes: 5201, - bytes_read: 1770974, - bytes_written: 50401 - } - ); - } - - #[test] - async fn page_state_cache() { - assert_eq!( - run_test(&mut PageStateCache::::new()).await, - FlashStatsResult { - erases: 198, - reads: 171543, - writes: 5201, - bytes_read: 1718356, - bytes_written: 50401 - } - ); - } - - #[test] - async fn page_pointer_cache() { - assert_eq!( - run_test(&mut PagePointerCache::::new()).await, - FlashStatsResult { - erases: 198, - reads: 153667, - writes: 5201, - bytes_read: 1575348, - bytes_written: 50401 - } - ); - } - - #[test] - async fn key_pointer_cache_half() { - assert_eq!( - run_test(&mut KeyPointerCache::::new()).await, - FlashStatsResult { - erases: 198, - reads: 130523, - writes: 5201, - bytes_read: 1318479, - bytes_written: 50401 - } - ); - } - - #[test] - async fn key_pointer_cache_full() { - assert_eq!( - run_test(&mut KeyPointerCache::::new()).await, - FlashStatsResult { - erases: 198, - reads: 14506, - writes: 5201, - bytes_read: 150566, - bytes_written: 50401 - } - ); - } - - #[derive(Debug, PartialEq, Eq)] - struct MockStorageItem { - key: u8, - value: Vec, - } - - #[derive(Debug, PartialEq, Eq)] - enum MockStorageItemError { - BufferTooSmall, - InvalidKey, - BufferTooBig, - } - - impl StorageItem for MockStorageItem { - type Key = u8; - - type Error = MockStorageItemError; - - fn serialize_into(&self, buffer: &mut [u8]) -> Result { - if buffer.len() < 2 + self.value.len() { - return Err(MockStorageItemError::BufferTooSmall); - } - - if self.value.len() > 255 { - return Err(MockStorageItemError::BufferTooBig); - } - - // The serialized value must not be all 0xFF - if self.key == 0xFF { - return Err(MockStorageItemError::InvalidKey); - } - - buffer[0] = self.key; - buffer[1] = self.value.len() as u8; - buffer[2..][..self.value.len()].copy_from_slice(&self.value); - - Ok(2 + self.value.len()) - } - - fn deserialize_from(buffer: &[u8]) -> Result - where - Self: Sized, - { - if buffer.len() < 2 { - return Err(MockStorageItemError::BufferTooSmall); - } - - if buffer[0] == 0xFF { - return Err(MockStorageItemError::InvalidKey); - } - - let len = buffer[1]; - - if buffer.len() < 2 + len as usize { - return Err(MockStorageItemError::BufferTooSmall); - } - - Ok(Self { - key: buffer[0], - value: buffer[2..][..len as usize].to_vec(), - }) - } - - fn key(&self) -> Self::Key { - self.key - } - } - - async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { - let mut flash = - mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); - const FLASH_RANGE: Range = 0x00..0x400; - let mut data_buffer = AlignedBuf([0; 128]); - - const LENGHT_PER_KEY: [usize; 24] = [ - 11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5, - ]; - - let start_snapshot = flash.stats_snapshot(); - - for _ in 0..100 { - for i in 0..24 { - let item = MockStorageItem { - key: i as u8, - value: vec![i as u8; LENGHT_PER_KEY[i]], - }; - - store_item::<_, _>(&mut flash, FLASH_RANGE, cache, &mut data_buffer, &item) - .await - .unwrap(); - } - - for i in 0..24 { - let item = fetch_item::( - &mut flash, - FLASH_RANGE, - cache, - &mut data_buffer, - i as u8, - ) - .await - .unwrap() - .unwrap(); - - println!("Fetched {item:?}"); - - assert_eq!(item.value, vec![i as u8; LENGHT_PER_KEY[i]]); - } - } - - start_snapshot.compare_to(flash.stats_snapshot()) - } -} +// #[cfg(test)] +// mod queue_tests { +// use core::ops::Range; + +// use crate::{ +// cache::{CacheImpl, NoCache, PagePointerCache, PageStateCache}, +// mock_flash::{self, FlashStatsResult, WriteCountCheck}, +// queue::{peek, pop, push}, +// AlignedBuf, +// }; + +// use futures_test::test; + +// const NUM_PAGES: usize = 4; +// const LOOP_COUNT: usize = 2000; + +// #[test] +// async fn no_cache() { +// assert_eq!( +// run_test(&mut NoCache::new()).await, +// FlashStatsResult { +// erases: 146, +// reads: 594934, +// writes: 6299, +// bytes_read: 2766058, +// bytes_written: 53299 +// } +// ); +// } + +// #[test] +// async fn page_state_cache() { +// assert_eq!( +// run_test(&mut PageStateCache::::new()).await, +// FlashStatsResult { +// erases: 146, +// reads: 308740, +// writes: 6299, +// bytes_read: 2479864, +// bytes_written: 53299 +// } +// ); +// } + +// #[test] +// async fn page_pointer_cache() { +// assert_eq!( +// run_test(&mut PagePointerCache::::new()).await, +// FlashStatsResult { +// erases: 146, +// reads: 211172, +// writes: 6299, +// bytes_read: 1699320, +// bytes_written: 53299 +// } +// ); +// } + +// async fn run_test(cache: &mut impl CacheImpl) -> FlashStatsResult { +// let mut flash = +// mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); +// const FLASH_RANGE: Range = 0x00..0x400; +// let mut data_buffer = AlignedBuf([0; 1024]); + +// let start_snapshot = flash.stats_snapshot(); + +// for i in 0..LOOP_COUNT { +// println!("{i}"); +// let data = vec![i as u8; i % 20 + 1]; + +// println!("PUSH"); +// push(&mut flash, FLASH_RANGE, cache, &data, true) +// .await +// .unwrap(); +// assert_eq!( +// peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) +// .await +// .unwrap() +// .unwrap(), +// &data, +// "At {i}" +// ); +// println!("POP"); +// assert_eq!( +// pop(&mut flash, FLASH_RANGE, cache, &mut data_buffer) +// .await +// .unwrap() +// .unwrap(), +// &data, +// "At {i}" +// ); +// println!("PEEK"); +// assert_eq!( +// peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) +// .await +// .unwrap(), +// None, +// "At {i}" +// ); +// println!("DONE"); +// } + +// start_snapshot.compare_to(flash.stats_snapshot()) +// } +// } + +// #[cfg(test)] +// mod map_tests { +// use core::ops::Range; + +// use crate::{ +// cache::{KeyCacheImpl, KeyPointerCache, NoCache, PagePointerCache, PageStateCache}, +// map::{fetch_item, store_item}, +// mock_flash::{self, FlashStatsResult, WriteCountCheck}, +// AlignedBuf, +// }; + +// use futures_test::test; + +// const NUM_PAGES: usize = 4; + +// #[test] +// async fn no_cache() { +// assert_eq!( +// run_test(&mut NoCache::new()).await, +// FlashStatsResult { +// erases: 198, +// reads: 224161, +// writes: 5201, +// bytes_read: 1770974, +// bytes_written: 50401 +// } +// ); +// } + +// #[test] +// async fn page_state_cache() { +// assert_eq!( +// run_test(&mut PageStateCache::::new()).await, +// FlashStatsResult { +// erases: 198, +// reads: 171543, +// writes: 5201, +// bytes_read: 1718356, +// bytes_written: 50401 +// } +// ); +// } + +// #[test] +// async fn page_pointer_cache() { +// assert_eq!( +// run_test(&mut PagePointerCache::::new()).await, +// FlashStatsResult { +// erases: 198, +// reads: 153667, +// writes: 5201, +// bytes_read: 1575348, +// bytes_written: 50401 +// } +// ); +// } + +// #[test] +// async fn key_pointer_cache_half() { +// assert_eq!( +// run_test(&mut KeyPointerCache::::new()).await, +// FlashStatsResult { +// erases: 198, +// reads: 130523, +// writes: 5201, +// bytes_read: 1318479, +// bytes_written: 50401 +// } +// ); +// } + +// #[test] +// async fn key_pointer_cache_full() { +// assert_eq!( +// run_test(&mut KeyPointerCache::::new()).await, +// FlashStatsResult { +// erases: 198, +// reads: 14506, +// writes: 5201, +// bytes_read: 150566, +// bytes_written: 50401 +// } +// ); +// } + +// #[derive(Debug, PartialEq, Eq)] +// struct MockStorageItem { +// key: u8, +// value: Vec, +// } + +// #[derive(Debug, PartialEq, Eq)] +// enum MockStorageItemError { +// BufferTooSmall, +// InvalidKey, +// BufferTooBig, +// } + +// impl StorageItem for MockStorageItem { +// type Key = u8; + +// type Error = MockStorageItemError; + +// fn serialize_into(&self, buffer: &mut [u8]) -> Result { +// if buffer.len() < 2 + self.value.len() { +// return Err(MockStorageItemError::BufferTooSmall); +// } + +// if self.value.len() > 255 { +// return Err(MockStorageItemError::BufferTooBig); +// } + +// // The serialized value must not be all 0xFF +// if self.key == 0xFF { +// return Err(MockStorageItemError::InvalidKey); +// } + +// buffer[0] = self.key; +// buffer[1] = self.value.len() as u8; +// buffer[2..][..self.value.len()].copy_from_slice(&self.value); + +// Ok(2 + self.value.len()) +// } + +// fn deserialize_from(buffer: &[u8]) -> Result +// where +// Self: Sized, +// { +// if buffer.len() < 2 { +// return Err(MockStorageItemError::BufferTooSmall); +// } + +// if buffer[0] == 0xFF { +// return Err(MockStorageItemError::InvalidKey); +// } + +// let len = buffer[1]; + +// if buffer.len() < 2 + len as usize { +// return Err(MockStorageItemError::BufferTooSmall); +// } + +// Ok(Self { +// key: buffer[0], +// value: buffer[2..][..len as usize].to_vec(), +// }) +// } + +// fn key(&self) -> Self::Key { +// self.key +// } +// } + +// async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { +// let mut flash = +// mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); +// const FLASH_RANGE: Range = 0x00..0x400; +// let mut data_buffer = AlignedBuf([0; 128]); + +// const LENGHT_PER_KEY: [usize; 24] = [ +// 11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5, +// ]; + +// let start_snapshot = flash.stats_snapshot(); + +// for _ in 0..100 { +// for i in 0..24 { +// let item = MockStorageItem { +// key: i as u8, +// value: vec![i as u8; LENGHT_PER_KEY[i]], +// }; + +// store_item::<_, _>(&mut flash, FLASH_RANGE, cache, &mut data_buffer, &item) +// .await +// .unwrap(); +// } + +// for i in 0..24 { +// let item = fetch_item::( +// &mut flash, +// FLASH_RANGE, +// cache, +// &mut data_buffer, +// i as u8, +// ) +// .await +// .unwrap() +// .unwrap(); + +// println!("Fetched {item:?}"); + +// assert_eq!(item.value, vec![i as u8; LENGHT_PER_KEY[i]]); +// } +// } + +// start_snapshot.compare_to(flash.stats_snapshot()) +// } +// } diff --git a/src/item.rs b/src/item.rs index fc813b7..068f1f0 100644 --- a/src/item.rs +++ b/src/item.rs @@ -21,16 +21,16 @@ //! and has some other modifications to make corruption less likely to happen. //! -use core::num::NonZeroU32; use core::ops::Range; +use core::num::NonZeroU32; use embedded_storage_async::nor_flash::{MultiwriteNorFlash, NorFlash}; use crate::{ cache::PrivateCacheImpl, calculate_page_address, calculate_page_end_address, calculate_page_index, get_page_state, round_down_to_alignment, round_down_to_alignment_usize, - round_up_to_alignment, round_up_to_alignment_usize, AlignedBuf, BasicError, NorFlashExt, - PageState, MAX_WORD_SIZE, + round_up_to_alignment, round_up_to_alignment_usize, AlignedBuf, Error, NorFlashExt, PageState, + MAX_WORD_SIZE, }; #[derive(Debug)] @@ -54,7 +54,7 @@ impl ItemHeader { flash: &mut S, address: u32, end_address: u32, - ) -> Result, BasicError> { + ) -> Result, Error< S::Error>> { let mut buffer = [0; MAX_WORD_SIZE]; let header_slice = &mut buffer[..round_up_to_alignment_usize::(Self::LENGTH)]; @@ -65,7 +65,7 @@ impl ItemHeader { flash .read(address, header_slice) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -81,7 +81,7 @@ impl ItemHeader { let calculated_length_crc = crc16(&header_slice[Self::LENGTH_FIELD]); if calculated_length_crc != length_crc { - return Err(BasicError::Corrupted { + return Err(Error::Corrupted { #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), }); @@ -104,14 +104,14 @@ impl ItemHeader { data_buffer: &'d mut [u8], address: u32, end_address: u32, - ) -> Result, BasicError> { + ) -> Result, Error< S::Error>> { match self.crc { None => Ok(MaybeItem::Erased(self, data_buffer)), Some(header_crc) => { let data_address = ItemHeader::data_address::(address); let read_len = round_up_to_alignment_usize::(self.length as usize); if data_buffer.len() < read_len { - return Err(BasicError::BufferTooSmall(read_len)); + return Err(Error::BufferTooSmall(read_len)); } if data_address + read_len as u32 > end_address { return Ok(MaybeItem::Corrupted(self, data_buffer)); @@ -120,7 +120,7 @@ impl ItemHeader { flash .read(data_address, &mut data_buffer[..read_len]) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -145,7 +145,7 @@ impl ItemHeader { &self, flash: &mut S, address: u32, - ) -> Result<(), BasicError> { + ) -> Result<(), Error< S::Error>> { let mut buffer = AlignedBuf([0xFF; MAX_WORD_SIZE]); buffer[Self::DATA_CRC_FIELD] @@ -160,7 +160,7 @@ impl ItemHeader { &buffer[..round_up_to_alignment_usize::(Self::LENGTH)], ) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -174,7 +174,7 @@ impl ItemHeader { flash_range: Range, cache: &mut impl PrivateCacheImpl, address: u32, - ) -> Result> { + ) -> Result> { self.crc = None; cache.notice_item_erased::(flash_range.clone(), address, &self); self.write(flash, address).await?; @@ -223,7 +223,7 @@ impl<'d> Item<'d> { cache: &mut impl PrivateCacheImpl, address: u32, data: &'d [u8], - ) -> Result> { + ) -> Result> { let header = ItemHeader { length: data.len() as u16, crc: Some(adapted_crc32(data)), @@ -241,7 +241,7 @@ impl<'d> Item<'d> { header: &ItemHeader, data: &[u8], address: u32, - ) -> Result<(), BasicError> { + ) -> Result<(), Error< S::Error>> { cache.notice_item_written::(flash_range, address, header); header.write(flash, address).await?; @@ -251,7 +251,7 @@ impl<'d> Item<'d> { flash .write(data_address, data_block) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -266,7 +266,7 @@ impl<'d> Item<'d> { &buffer[..round_up_to_alignment_usize::(data_left.len())], ) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -282,7 +282,7 @@ impl<'d> Item<'d> { flash_range: Range, cache: &mut impl PrivateCacheImpl, address: u32, - ) -> Result<(), BasicError> { + ) -> Result<(), Error< S::Error>> { Self::write_raw( flash, flash_range, @@ -340,7 +340,7 @@ pub async fn find_next_free_item_spot( start_address: u32, end_address: u32, data_length: u32, -) -> Result, BasicError> { +) -> Result, Error< S::Error>> { let page_index = calculate_page_index::(flash_range, start_address); let free_item_address = match cache.first_item_after_written(page_index) { @@ -392,9 +392,9 @@ impl<'d> core::fmt::Debug for MaybeItem<'d> { } impl<'d> MaybeItem<'d> { - pub fn unwrap(self) -> Result, BasicError> { + pub fn unwrap(self) -> Result, Error< E>> { match self { - MaybeItem::Corrupted(_, _) => Err(BasicError::Corrupted { + MaybeItem::Corrupted(_, _) => Err(Error::Corrupted { #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), }), @@ -476,7 +476,7 @@ pub async fn is_page_empty( cache: &mut impl PrivateCacheImpl, page_index: usize, page_state: Option, -) -> Result> { +) -> Result> { let page_state = match page_state { Some(page_state) => page_state, None => get_page_state::(flash, flash_range.clone(), cache, page_index).await?, @@ -521,7 +521,7 @@ impl ItemIter { &mut self, flash: &mut S, data_buffer: &'m mut [u8], - ) -> Result, u32)>, BasicError> { + ) -> Result, u32)>, Error< S::Error>> { let mut data_buffer = Some(data_buffer); while let (Some(header), address) = self.header.next(flash).await? { let buffer = data_buffer.take().unwrap(); @@ -558,7 +558,7 @@ impl ItemHeaderIter { pub async fn next( &mut self, flash: &mut S, - ) -> Result<(Option, u32), BasicError> { + ) -> Result<(Option, u32), Error< S::Error>> { self.traverse(flash, |_, _| false).await } @@ -570,7 +570,7 @@ impl ItemHeaderIter { &mut self, flash: &mut S, callback: impl Fn(&ItemHeader, u32) -> bool, - ) -> Result<(Option, u32), BasicError> { + ) -> Result<(Option, u32), Error< S::Error>> { loop { match ItemHeader::read_new(flash, self.current_address, self.end_address).await { Ok(Some(header)) => { @@ -586,7 +586,7 @@ impl ItemHeaderIter { Ok(None) => { return Ok((None, self.current_address)); } - Err(BasicError::Corrupted { .. }) => { + Err(Error::Corrupted { .. }) => { self.current_address = ItemHeader::data_address::(self.current_address); } Err(e) => return Err(e), diff --git a/src/lib.rs b/src/lib.rs index c65ebba..0e6e357 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,12 +8,12 @@ // - flash write size is quite small, so it writes words and not full pages use cache::PrivateCacheImpl; -use map::KeyError; use core::{ fmt::Debug, ops::{Deref, DerefMut, Range}, }; use embedded_storage_async::nor_flash::NorFlash; +use map::MapItemError; pub mod cache; mod item; @@ -50,13 +50,13 @@ async fn try_general_repair( flash: &mut S, flash_range: Range, cache: &mut impl PrivateCacheImpl, -) -> Result<(), BasicError> { +) -> Result<(), Error< S::Error>> { // Loop through the pages and get their state. If one returns the corrupted error, // the page is likely half-erased. Fix for that is to re-erase again to hopefully finish the job. for page_index in get_pages::(flash_range.clone(), 0) { if matches!( get_page_state(flash, flash_range.clone(), cache, page_index).await, - Err(BasicError::Corrupted { .. }) + Err(Error::Corrupted { .. }) ) { open_page(flash, flash_range.clone(), cache, page_index).await?; } @@ -74,7 +74,7 @@ async fn find_first_page( cache: &mut impl PrivateCacheImpl, starting_page_index: usize, page_state: PageState, -) -> Result, BasicError> { +) -> Result, Error< S::Error>> { for page_index in get_pages::(flash_range.clone(), starting_page_index) { if page_state == get_page_state::(flash, flash_range.clone(), cache, page_index).await? { return Ok(Some(page_index)); @@ -138,7 +138,7 @@ async fn get_page_state( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result> { +) -> Result> { if let Some(cached_page_state) = cache.get_page_state(page_index) { return Ok(cached_page_state); } @@ -153,7 +153,7 @@ async fn get_page_state( flash .read(page_address, &mut buffer[..S::READ_SIZE]) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -170,7 +170,7 @@ async fn get_page_state( &mut buffer[..S::READ_SIZE], ) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -186,7 +186,7 @@ async fn get_page_state( (true, false) => PageState::PartialOpen, // Probably an interrupted erase (false, true) => { - return Err(BasicError::Corrupted { + return Err(Error::Corrupted { #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), }) @@ -206,7 +206,7 @@ async fn open_page( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result<(), BasicError> { +) -> Result<(), Error< S::Error>> { cache.notice_page_state(page_index, PageState::Open, true); flash @@ -215,7 +215,7 @@ async fn open_page( calculate_page_end_address::(flash_range.clone(), page_index), ) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -230,7 +230,7 @@ async fn close_page( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result<(), BasicError> { +) -> Result<(), Error< S::Error>> { let current_state = partial_close_page::(flash, flash_range.clone(), cache, page_index).await?; @@ -248,7 +248,7 @@ async fn close_page( &buffer[..S::WORD_SIZE], ) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -263,7 +263,7 @@ async fn partial_close_page( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result> { +) -> Result> { let current_state = get_page_state::(flash, flash_range.clone(), cache, page_index).await?; if current_state != PageState::Open { @@ -286,7 +286,7 @@ async fn partial_close_page( &buffer[..S::WORD_SIZE], ) .await - .map_err(|e| BasicError::Storage { + .map_err(|e| Error::Storage { value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -333,39 +333,11 @@ impl PageState { } } -#[non_exhaustive] -#[derive(Debug)] -#[cfg_attr(feature = "defmt-03", derive(defmt::Format))] -enum BasicError { - /// An error in the storage (flash) - Storage { - /// The error value - value: S, - #[cfg(feature = "_test")] - /// Backtrace made at the construction of the error - backtrace: std::backtrace::Backtrace, - }, - /// The item cannot be stored anymore because the storage is full. - /// If you get this error some data may be lost. - FullStorage, - /// It's been detected that the memory is likely corrupted. - /// You may want to erase the memory to recover. - Corrupted { - #[cfg(feature = "_test")] - /// Backtrace made at the construction of the error - backtrace: std::backtrace::Backtrace, - }, - /// A provided buffer was to big to be used - BufferTooBig, - /// A provided buffer was to small to be used (usize is size needed) - BufferTooSmall(usize), -} - /// The main error type #[non_exhaustive] #[derive(Debug)] #[cfg_attr(feature = "defmt-03", derive(defmt::Format))] -pub enum Error { +pub enum Error { /// An error in the storage (flash) Storage { /// The error value @@ -389,20 +361,12 @@ pub enum Error { /// A provided buffer was to small to be used (usize is size needed) BufferTooSmall(usize), /// A storage item error - Item(I), - KeyError(KeyError), + Item(MapItemError), } -impl From for Error { - fn from(v: KeyError) -> Self { - Self::KeyError(v) - } -} - -impl PartialEq for Error { +impl PartialEq for Error { fn eq(&self, other: &Self) -> bool { match (self, other) { - (Self::Item(l0), Self::Item(r0)) => l0 == r0, (Self::Storage { value: l_value, .. }, Self::Storage { value: r_value, .. }) => { l_value == r_value } @@ -412,35 +376,8 @@ impl PartialEq for Error { } } -impl From> for Error { - fn from(value: BasicError) -> Self { - match value { - BasicError::Storage { - value, - #[cfg(feature = "_test")] - backtrace, - } => Self::Storage { - value, - #[cfg(feature = "_test")] - backtrace, - }, - BasicError::FullStorage => Self::FullStorage, - BasicError::Corrupted { - #[cfg(feature = "_test")] - backtrace, - } => Self::Corrupted { - #[cfg(feature = "_test")] - backtrace, - }, - BasicError::BufferTooBig => Self::BufferTooBig, - BasicError::BufferTooSmall(needed) => Self::BufferTooSmall(needed), - } - } -} - -impl core::fmt::Display for Error +impl core::fmt::Display for Error where - I: core::fmt::Display, S: core::fmt::Display, { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { @@ -454,7 +391,6 @@ where "A provided buffer was to small to be used. Needed was {needed}" ), Error::Item(value) => write!(f, "Item error: {value}"), - Error::KeyError(value) => write!(f, "Key error: {value:?}"), } } } diff --git a/src/map.rs b/src/map.rs index 5379748..290d573 100644 --- a/src/map.rs +++ b/src/map.rs @@ -2,13 +2,10 @@ //! //! When a key-value is stored, it overwrites the any old items with the same key. //! -//! Make sure to use the same [StorageItem] type on a given range in flash. -//! In theory you could use multiple types if you're careful, but they must at least have the same key definition and format. -//! //! ## Basic API: //! //! ```rust -//! # use sequential_storage::map::{store_item, fetch_item, StorageItem}; +//! # use sequential_storage::map::{store_item, fetch_item}; //! # use sequential_storage::cache::NoCache; //! # use mock_flash::MockFlashBase; //! # use futures::executor::block_on; @@ -19,50 +16,6 @@ //! # fn init_flash() -> Flash { //! # Flash::new(mock_flash::WriteCountCheck::Twice, None, false) //! # } -//! // We create the type we want to store in this part of flash. -//! // It itself must contain the key and the value. -//! // On this part of flash, we must only call the functions using this type. -//! // If you start to mix, bad things will happen. -//! -//! #[derive(Debug, PartialEq)] -//! struct MyCustomType { -//! key: u8, -//! data: u32, -//! } -//! -//! // We implement StorageItem for our type. This lets the crate -//! // know how to serialize and deserialize the data and get its key for comparison. -//! -//! impl StorageItem for MyCustomType { -//! type Key = u8; -//! type Error = Error; -//! -//! fn serialize_into(&self, buffer: &mut [u8]) -> Result { -//! if buffer.len() < 5 { -//! return Err(Error::BufferTooSmall); -//! } -//! -//! buffer[0] = self.key; -//! buffer[1..5].copy_from_slice(&self.data.to_le_bytes()); -//! -//! Ok(5) -//! } -//! fn deserialize_from(buffer: &[u8]) -> Result { -//! // We don't have to check the length. This buffer has the same length -//! // as what we serialized, so in this case it's always 5. -//! -//! Ok(Self { -//! key: buffer[0], -//! data: u32::from_le_bytes(buffer[1..5].try_into().unwrap()), -//! }) -//! } -//! fn key(&self) -> Self::Key { self.key } -//! } -//! -//! #[derive(Debug)] -//! enum Error { -//! BufferTooSmall -//! } //! //! # block_on(async { //! // Initialize the flash. This can be internal or external @@ -80,7 +33,7 @@ //! // Nothing is stored in it yet, so it will return None. //! //! assert_eq!( -//! fetch_item::( +//! fetch_item::( //! &mut flash, //! flash_range.clone(), //! &mut NoCache::new(), @@ -92,38 +45,37 @@ //! //! // Now we store an item the flash with key 42 //! -//! store_item::( +//! store_item( //! &mut flash, //! flash_range.clone(), //! &mut NoCache::new(), //! &mut data_buffer, -//! &MyCustomType { key: 42, data: 104729 }, +//! 42u8, +//! &104729u32, //! ).await.unwrap(); //! //! // When we ask for key 42, we not get back a Some with the correct value //! //! assert_eq!( -//! fetch_item::( +//! fetch_item::( //! &mut flash, //! flash_range.clone(), //! &mut NoCache::new(), //! &mut data_buffer, //! 42, //! ).await.unwrap(), -//! Some(MyCustomType { key: 42, data: 104729 }) +//! Some(104729) //! ); //! # }); //! ``` -use core::convert::Infallible; - use embedded_storage_async::nor_flash::MultiwriteNorFlash; use crate::item::{find_next_free_item_spot, Item, ItemHeader, ItemIter}; use self::{ cache::{KeyCacheImpl, PrivateKeyCacheImpl}, - item::ItemHeaderIter, + item::{ItemHeaderIter, ItemUnborrowed}, }; use super::*; @@ -138,19 +90,15 @@ use super::*; /// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time /// or types that serialize and deserialize the key in the same way.* -pub async fn fetch_item( +pub async fn fetch_item<'d, K: Key, I: MapItem<'d>, S: NorFlash>( flash: &mut S, flash_range: Range, - cache: &mut impl KeyCacheImpl, - data_buffer: &mut [u8], - search_key: I::Key, -) -> Result, Error> { - run_with_auto_repair!( + cache: &mut impl KeyCacheImpl, + data_buffer: &'d mut [u8], + search_key: K, +) -> Result, Error> { + let result = run_with_auto_repair!( function = { - if cache.is_dirty() { - cache.invalidate_cache_state(); - } - fetch_item_with_location( flash, flash_range.clone(), @@ -159,10 +107,22 @@ pub async fn fetch_item( search_key.clone(), ) .await - .map(|value| value.map(|(item, _, _)| item)) }, - repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? - ) + repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? + ); + + let Some((item, _)) = result? else { + return Ok(None); + }; + + let item = item.reborrow(data_buffer); + + let data_len = item.data().len(); + + Ok(Some( + I::deserialize_from(&item.destruct().1[K::LEN..][..data_len - K::LEN]) + .map_err(Error::Item)?, + )) } /// Fetch the item, but with the address and header @@ -173,7 +133,7 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( cache: &mut impl PrivateKeyCacheImpl, data_buffer: &'d mut [u8], search_key: K, -) -> Result, u32)>, Error> { +) -> Result, Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); assert!(flash_range.end - flash_range.start >= S::ERASE_SIZE as u32 * 2); @@ -181,6 +141,10 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( assert!(S::ERASE_SIZE >= S::WORD_SIZE * 3); assert!(S::WORD_SIZE <= MAX_WORD_SIZE); + if cache.is_dirty() { + cache.invalidate_cache_state(); + } + 'cache: { if let Some(cached_location) = cache.key_location(&search_key) { let page_index = calculate_page_index::(flash_range.clone(), cached_location); @@ -214,7 +178,7 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( break 'cache; } item::MaybeItem::Present(item) => { - return Ok(Some((item, cached_location))); + return Ok(Some((item.unborrow(), cached_location))); } } } @@ -269,7 +233,7 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( let mut it = ItemIter::new(page_data_start_address, page_data_end_address); while let Some((item, address)) = it.next(flash, data_buffer).await? { - if K::deserialize_from(item.data())?.0 == search_key { + if K::deserialize_from(&item.data()[..K::LEN]) == search_key { newest_found_item_address = Some(address); } } @@ -312,7 +276,7 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( .read_item(flash, data_buffer, newest_found_item_address, u32::MAX) .await?; - Ok(Some((item.unwrap()?, newest_found_item_address))) + Ok(Some((item.unwrap()?.unborrow(), newest_found_item_address))) } else { Ok(None) } @@ -326,14 +290,14 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( /// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time /// or types that serialize and deserialize the key in the same way.* -pub async fn store_item( +pub async fn store_item<'d, K: Key, I: MapItem<'d>, S: NorFlash>( flash: &mut S, flash_range: Range, - cache: &mut impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], key: K, item: &I, -) -> Result<(), Error> { +) -> Result<(), Error> { run_with_auto_repair!( function = store_item_inner( flash, @@ -344,18 +308,18 @@ pub async fn store_item( item ) .await, - repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? + repair = try_repair::(flash, flash_range.clone(), cache, data_buffer).await? ) } -async fn store_item_inner( +async fn store_item_inner<'d, K: Key, S: NorFlash>( flash: &mut S, flash_range: Range, - cache: &mut impl KeyCacheImpl, + cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], key: K, - item: &I, -) -> Result<(), Error> { + item: &dyn MapItem<'d>, +) -> Result<(), Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -409,8 +373,11 @@ async fn store_item_inner( calculate_page_end_address::(flash_range.clone(), partial_open_page) - S::WORD_SIZE as u32; - todo!("Serialize key into the buffer here too!"); - let item_data_length = item.serialize_into(data_buffer).map_err(Error::Item)?; + key.serialize_into(&mut data_buffer[..K::LEN]); + let item_data_length = K::LEN + + item + .serialize_into(&mut data_buffer[K::LEN..]) + .map_err(Error::Item)?; let free_spot_address = find_next_free_item_spot( flash, @@ -424,7 +391,7 @@ async fn store_item_inner( match free_spot_address { Some(free_spot_address) => { - cache.notice_key_location(item.key(), free_spot_address, true); + cache.notice_key_location(key.clone(), free_spot_address, true); Item::write_new( flash, flash_range.clone(), @@ -528,7 +495,7 @@ pub async fn remove_item( cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], search_key: K, -) -> Result<(), Error> { +) -> Result<(), Error> { run_with_auto_repair!( function = remove_item_inner::( flash, @@ -548,7 +515,7 @@ async fn remove_item_inner( cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], search_key: K, -) -> Result<(), Error> { +) -> Result<(), Error> { cache.notice_key_erased(&search_key); // Search for the last used page. We're gonna erase from the one after this first. @@ -589,7 +556,7 @@ async fn remove_item_inner( item::MaybeItem::Corrupted(_, _) => continue, item::MaybeItem::Erased(_, _) => continue, item::MaybeItem::Present(item) => { - let (item_key, _) = K::deserialize_from(item.data())?; + let item_key = K::deserialize_from(&item.data()[..K::LEN]); // If this item has the same key as the key we're trying to erase, then erase the item. // But keep going! We need to erase everything. @@ -610,26 +577,143 @@ async fn remove_item_inner( } pub trait Key: Eq + Clone + Sized { - fn serialize_into(&self, buffer: &mut [u8]) -> Result; - fn deserialize_from(buffer: &[u8]) -> Result<(Self, usize), KeyError>; + const LEN: usize; + + fn serialize_into(&self, buffer: &mut [u8]); + fn deserialize_from(buffer: &[u8]) -> Self; +} + +macro_rules! impl_key_num { + ($int:ty) => { + impl Key for $int { + const LEN: usize = core::mem::size_of::(); + + fn serialize_into(&self, buffer: &mut [u8]) { + buffer.copy_from_slice(&self.to_le_bytes()); + } + + fn deserialize_from(buffer: &[u8]) -> Self { + Self::from_le_bytes(buffer.try_into().unwrap()) + } + } + }; +} + +impl_key_num!(u8); +impl_key_num!(u16); +impl_key_num!(u32); +impl_key_num!(u64); +impl_key_num!(u128); +impl_key_num!(i8); +impl_key_num!(i16); +impl_key_num!(i32); +impl_key_num!(i64); +impl_key_num!(i128); + +impl Key for [u8; N] { + const LEN: usize = N; + + fn serialize_into(&self, buffer: &mut [u8]) { + buffer.copy_from_slice(self); + } + + fn deserialize_from(buffer: &[u8]) -> Self { + buffer[..Self::LEN].try_into().unwrap() + } } -pub trait MapItem: Sized { - /// The error type for serialization and deserialization - type Error; +pub trait MapItem<'a> { + fn serialize_into(&self, buffer: &mut [u8]) -> Result; + fn deserialize_from(buffer: &'a [u8]) -> Result + where + Self: Sized; +} + +impl<'a> MapItem<'a> for &'a [u8] { + fn serialize_into(&self, buffer: &mut [u8]) -> Result { + if buffer.len() < self.len() { + return Err(MapItemError::BufferTooSmall); + } - fn serialize_into(&self, buffer: &mut [u8]) -> Result; - fn deserialize_from(buffer: &[u8]) -> Result; + buffer[..self.len()].copy_from_slice(self); + Ok(self.len()) + } + + fn deserialize_from(buffer: &'a [u8]) -> Result + where + Self: Sized, + { + Ok(buffer) + } } +impl<'a, const N: usize> MapItem<'a> for [u8; N] { + fn serialize_into(&self, buffer: &mut [u8]) -> Result { + if buffer.len() < self.len() { + return Err(MapItemError::BufferTooSmall); + } + + buffer[..self.len()].copy_from_slice(self); + Ok(self.len()) + } + + fn deserialize_from(buffer: &'a [u8]) -> Result + where + Self: Sized, + { + buffer.try_into().map_err(|_| MapItemError::BufferTooSmall) + } +} + +macro_rules! impl_map_item_num { + ($int:ty) => { + impl<'a> MapItem<'a> for $int { + fn serialize_into(&self, buffer: &mut [u8]) -> Result { + buffer[..core::mem::size_of::()].copy_from_slice(&self.to_le_bytes()); + Ok(core::mem::size_of::()) + } + + fn deserialize_from(buffer: &[u8]) -> Result { + Ok(Self::from_le_bytes( + buffer[..core::mem::size_of::()] + .try_into() + .map_err(|_| MapItemError::BufferTooSmall)?, + )) + } + } + }; +} + +impl_map_item_num!(u8); +impl_map_item_num!(u16); +impl_map_item_num!(u32); +impl_map_item_num!(u64); +impl_map_item_num!(u128); +impl_map_item_num!(i8); +impl_map_item_num!(i16); +impl_map_item_num!(i32); +impl_map_item_num!(i64); +impl_map_item_num!(i128); +impl_map_item_num!(f32); +impl_map_item_num!(f64); + #[non_exhaustive] -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "defmt-03", derive(defmt::Format))] -pub enum KeyError { +pub enum MapItemError { BufferTooSmall, InvalidFormat, } +impl core::fmt::Display for MapItemError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + MapItemError::BufferTooSmall => write!(f, "Buffer too small"), + MapItemError::InvalidFormat => write!(f, "Invalid format"), + } + } +} + async fn migrate_items( flash: &mut S, flash_range: Range, @@ -637,7 +721,7 @@ async fn migrate_items( data_buffer: &mut [u8], source_page: usize, target_page: usize, -) -> Result<(), Error> { +) -> Result<(), Error> { // We need to move the data from the next buffer page to the next_page_to_use, but only if that data // doesn't have a newer value somewhere else. @@ -649,7 +733,7 @@ async fn migrate_items( calculate_page_end_address::(flash_range.clone(), source_page) - S::WORD_SIZE as u32, ); while let Some((item, item_address)) = it.next(flash, data_buffer).await? { - let (key, _) = K::deserialize_from(item.data())?; + let key = K::deserialize_from(&item.data()[..K::LEN]); let (_, data_buffer) = item.destruct(); // Search for the newest item with the key we found @@ -669,6 +753,8 @@ async fn migrate_items( }); }; + let found_item = found_item.reborrow(data_buffer); + if found_address == item_address { cache.notice_key_location(key, next_page_write_address, true); found_item @@ -700,7 +786,7 @@ async fn try_repair( flash_range: Range, cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], -) -> Result<(), Error> { +) -> Result<(), Error> { cache.invalidate_cache_state(); crate::try_general_repair(flash, flash_range.clone(), cache).await?; @@ -744,74 +830,6 @@ mod tests { type MockFlashBig = mock_flash::MockFlashBase<4, 4, 256>; type MockFlashTiny = mock_flash::MockFlashBase<2, 1, 32>; - #[derive(Debug, PartialEq, Eq)] - struct MockStorageItem { - key: u8, - value: Vec, - } - - #[derive(Debug, PartialEq, Eq)] - enum MockStorageItemError { - BufferTooSmall, - InvalidKey, - BufferTooBig, - } - - impl StorageItem for MockStorageItem { - type Key = u8; - - type Error = MockStorageItemError; - - fn serialize_into(&self, buffer: &mut [u8]) -> Result { - if buffer.len() < 2 + self.value.len() { - return Err(MockStorageItemError::BufferTooSmall); - } - - if self.value.len() > 255 { - return Err(MockStorageItemError::BufferTooBig); - } - - // The serialized value must not be all 0xFF - if self.key == 0xFF { - return Err(MockStorageItemError::InvalidKey); - } - - buffer[0] = self.key; - buffer[1] = self.value.len() as u8; - buffer[2..][..self.value.len()].copy_from_slice(&self.value); - - Ok(2 + self.value.len()) - } - - fn deserialize_from(buffer: &[u8]) -> Result - where - Self: Sized, - { - if buffer.len() < 2 { - return Err(MockStorageItemError::BufferTooSmall); - } - - if buffer[0] == 0xFF { - return Err(MockStorageItemError::InvalidKey); - } - - let len = buffer[1]; - - if buffer.len() < 2 + len as usize { - return Err(MockStorageItemError::BufferTooSmall); - } - - Ok(Self { - key: buffer[0], - value: buffer[2..][..len as usize].to_vec(), - }) - } - - fn key(&self) -> Self::Key { - self.key - } - } - #[test] async fn store_and_fetch() { let mut flash = MockFlashBig::default(); @@ -821,7 +839,7 @@ mod tests { let start_snapshot = flash.stats_snapshot(); - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -832,7 +850,7 @@ mod tests { .unwrap(); assert_eq!(item, None); - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -843,7 +861,7 @@ mod tests { .unwrap(); assert_eq!(item, None); - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -854,32 +872,28 @@ mod tests { .unwrap(); assert_eq!(item, None); - store_item::<_, _>( + store_item( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: 0, - value: vec![5], - }, + 0u8, + &[5], ) .await .unwrap(); - store_item::<_, _>( + store_item( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: 0, - value: vec![5, 6], - }, + 0u8, + &[5, 6], ) .await .unwrap(); - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -889,23 +903,20 @@ mod tests { .await .unwrap() .unwrap(); - assert_eq!(item.key, 0); - assert_eq!(item.value, vec![5, 6]); + assert_eq!(item, &[5, 6]); - store_item::<_, _>( + store_item( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: 1, - value: vec![2, 2, 2, 2, 2, 2], - }, + 1u8, + &[2, 2, 2, 2, 2, 2], ) .await .unwrap(); - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -915,10 +926,9 @@ mod tests { .await .unwrap() .unwrap(); - assert_eq!(item.key, 0); - assert_eq!(item.value, vec![5, 6]); + assert_eq!(item, &[5, 6]); - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -928,26 +938,23 @@ mod tests { .await .unwrap() .unwrap(); - assert_eq!(item.key, 1); - assert_eq!(item.value, vec![2, 2, 2, 2, 2, 2]); + assert_eq!(item, &[2, 2, 2, 2, 2, 2]); for index in 0..4000 { - store_item::<_, _>( + store_item( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: (index % 10) as u8, - value: vec![(index % 10) as u8 * 2; index % 10], - }, + (index % 10) as u8, + &vec![(index % 10) as u8 * 2; index % 10].as_slice(), ) .await .unwrap(); } for i in 0..10 { - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -957,27 +964,24 @@ mod tests { .await .unwrap() .unwrap(); - assert_eq!(item.key, i); - assert_eq!(item.value, vec![(i % 10) * 2; (i % 10) as usize]); + assert_eq!(item, &vec![(i % 10) * 2; (i % 10) as usize]); } for _ in 0..4000 { - store_item::<_, _>( + store_item( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: 11, - value: vec![0; 10], - }, + 11u8, + &[0; 10], ) .await .unwrap(); } for i in 0..10 { - let item = fetch_item::( + let item = fetch_item::( &mut flash, flash_range.clone(), &mut cache::NoCache::new(), @@ -987,8 +991,7 @@ mod tests { .await .unwrap() .unwrap(); - assert_eq!(item.key, i); - assert_eq!(item.value, vec![(i % 10) * 2; (i % 10) as usize]); + assert_eq!(item, &vec![(i % 10) * 2; (i % 10) as usize]); } println!("{:?}", start_snapshot.compare_to(flash.stats_snapshot()),); @@ -996,46 +999,41 @@ mod tests { #[test] async fn store_too_many_items() { - const UPPER_BOUND: u8 = 2; + const UPPER_BOUND: u8 = 3; let mut tiny_flash = MockFlashTiny::default(); let mut data_buffer = AlignedBuf([0; 128]); for i in 0..UPPER_BOUND { - let item = MockStorageItem { - key: i, - value: vec![i; i as usize], - }; - println!("Storing {item:?}"); + println!("Storing {i:?}"); - store_item::<_, _>( + store_item( &mut tiny_flash, 0x00..0x40, &mut cache::NoCache::new(), &mut data_buffer, - &item, + i, + &vec![i; i as usize].as_slice(), ) .await .unwrap(); } assert_eq!( - store_item::<_, _>( + store_item( &mut tiny_flash, 0x00..0x40, &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: UPPER_BOUND, - value: vec![0; UPPER_BOUND as usize], - }, + UPPER_BOUND, + &vec![0; UPPER_BOUND as usize].as_slice(), ) .await, Err(Error::FullStorage) ); for i in 0..UPPER_BOUND { - let item = fetch_item::( + let item = fetch_item::( &mut tiny_flash, 0x00..0x40, &mut cache::NoCache::new(), @@ -1048,52 +1046,47 @@ mod tests { println!("Fetched {item:?}"); - assert_eq!(item.value, vec![i; i as usize]); + assert_eq!(item, vec![i; i as usize]); } } #[test] async fn store_too_many_items_big() { - const UPPER_BOUND: u8 = 67; + const UPPER_BOUND: u8 = 68; let mut big_flash = MockFlashBig::default(); let mut data_buffer = AlignedBuf([0; 128]); for i in 0..UPPER_BOUND { - let item = MockStorageItem { - key: i, - value: vec![i; i as usize], - }; - println!("Storing {item:?}"); + println!("Storing {i:?}"); - store_item::<_, _>( + store_item( &mut big_flash, 0x0000..0x1000, &mut cache::NoCache::new(), &mut data_buffer, - &item, + i, + &vec![i; i as usize].as_slice(), ) .await .unwrap(); } assert_eq!( - store_item::<_, _>( + store_item( &mut big_flash, 0x0000..0x1000, &mut cache::NoCache::new(), &mut data_buffer, - &MockStorageItem { - key: UPPER_BOUND, - value: vec![0; UPPER_BOUND as usize], - }, + UPPER_BOUND, + &vec![0; UPPER_BOUND as usize].as_slice(), ) .await, Err(Error::FullStorage) ); for i in 0..UPPER_BOUND { - let item = fetch_item::( + let item = fetch_item::( &mut big_flash, 0x0000..0x1000, &mut cache::NoCache::new(), @@ -1106,7 +1099,7 @@ mod tests { println!("Fetched {item:?}"); - assert_eq!(item.value, vec![i; i as usize]); + assert_eq!(item, vec![i; i as usize]); } } @@ -1121,17 +1114,13 @@ mod tests { for _ in 0..100 { for i in 0..24 { - let item = MockStorageItem { - key: i as u8, - value: vec![i as u8; LENGHT_PER_KEY[i]], - }; - - store_item::<_, _>( + store_item( &mut flash, 0x0000..0x4000, &mut cache::NoCache::new(), &mut data_buffer, - &item, + i as u16, + &vec![i as u8; LENGHT_PER_KEY[i]].as_slice(), ) .await .unwrap(); @@ -1139,12 +1128,12 @@ mod tests { } for i in 0..24 { - let item = fetch_item::( + let item = fetch_item::( &mut flash, 0x0000..0x4000, &mut cache::NoCache::new(), &mut data_buffer, - i as u8, + i as u16, ) .await .unwrap() @@ -1152,7 +1141,7 @@ mod tests { println!("Fetched {item:?}"); - assert_eq!(item.value, vec![i as u8; LENGHT_PER_KEY[i]]); + assert_eq!(item, vec![i as u8; LENGHT_PER_KEY[i]]); } } @@ -1169,17 +1158,13 @@ mod tests { // Add some data to flash for j in 0..10 { for i in 0..24 { - let item = MockStorageItem { - key: i as u8, - value: vec![i as u8; j + 2], - }; - - store_item::<_, _>( + store_item( &mut flash, FLASH_RANGE, &mut cache::NoCache::new(), &mut data_buffer, - &item, + i as u8, + &vec![i as u8; j + 2].as_slice(), ) .await .unwrap(); @@ -1189,7 +1174,7 @@ mod tests { for j in (0..24).rev() { // Are all things still in flash that we expect? for i in 0..=j { - assert!(fetch_item::( + assert!(fetch_item::( &mut flash, FLASH_RANGE, &mut cache::NoCache::new(), @@ -1202,7 +1187,7 @@ mod tests { } // Remove the item - remove_item::( + remove_item::( &mut flash, FLASH_RANGE, &mut cache::NoCache::new(), @@ -1214,7 +1199,7 @@ mod tests { // Are all things still in flash that we expect? for i in 0..j { - assert!(fetch_item::( + assert!(fetch_item::( &mut flash, FLASH_RANGE, &mut cache::NoCache::new(), @@ -1226,7 +1211,7 @@ mod tests { .is_some()); } - assert!(fetch_item::( + assert!(fetch_item::( &mut flash, FLASH_RANGE, &mut cache::NoCache::new(), diff --git a/src/queue.rs b/src/queue.rs index 8ca0466..516e131 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -55,8 +55,6 @@ //! # }); //! ``` -use core::convert::Infallible; - use crate::item::{find_next_free_item_spot, is_page_empty, Item, ItemHeader, ItemHeaderIter}; use self::{cache::CacheImpl, item::ItemUnborrowed}; @@ -78,7 +76,7 @@ pub async fn push( cache: &mut impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, -) -> Result<(), Error> { +) -> Result<(), Error< S::Error>> { run_with_auto_repair!( function = push_inner( flash, @@ -98,7 +96,7 @@ async fn push_inner( cache: &mut impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, -) -> Result<(), Error> { +) -> Result<(), Error< S::Error>> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -115,7 +113,7 @@ async fn push_inner( as usize { cache.unmark_dirty(); - return Err(BasicError::BufferTooBig.into()); + return Err(Error::BufferTooBig); } let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?; @@ -161,7 +159,7 @@ async fn push_inner( .await? { cache.unmark_dirty(); - return Err(BasicError::FullStorage.into()); + return Err(Error::FullStorage); } open_page(flash, flash_range.clone(), cache, next_page).await?; @@ -201,7 +199,7 @@ pub async fn peek_many<'d, S: NorFlash, CI: CacheImpl>( flash: &'d mut S, flash_range: Range, cache: &'d mut CI, -) -> Result, Error> { +) -> Result, Error< S::Error>> { // Note: Corruption repair is done in these functions already Ok(PeekIterator { iter: QueueIterator::new(flash, flash_range, cache).await?, @@ -223,7 +221,7 @@ pub async fn peek<'d, S: NorFlash>( flash_range: Range, cache: &mut impl CacheImpl, data_buffer: &'d mut [u8], -) -> Result, Error> { +) -> Result, Error< S::Error>> { // Note: Corruption repair is done in these functions already peek_many(flash, flash_range, cache) .await? @@ -240,7 +238,7 @@ pub async fn pop_many<'d, S: MultiwriteNorFlash, CI: CacheImpl>( flash: &'d mut S, flash_range: Range, cache: &'d mut CI, -) -> Result, Error> { +) -> Result, Error< S::Error>> { // Note: Corruption repair is done in these functions already Ok(PopIterator { iter: QueueIterator::new(flash, flash_range, cache).await?, @@ -262,7 +260,7 @@ pub async fn pop<'d, S: MultiwriteNorFlash>( flash_range: Range, cache: &mut impl CacheImpl, data_buffer: &'d mut [u8], -) -> Result, Error> { +) -> Result, Error< S::Error>> { pop_many(flash, flash_range, cache) .await? .next(data_buffer) @@ -287,7 +285,7 @@ impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { pub async fn next<'m>( &mut self, data_buffer: &'m mut [u8], - ) -> Result, Error> { + ) -> Result, Error< S::Error>> { let value = run_with_auto_repair!( function = self.next_inner(data_buffer).await, repair = try_repair( @@ -304,7 +302,7 @@ impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { async fn next_inner( &mut self, data_buffer: &mut [u8], - ) -> Result, Error> { + ) -> Result, Error< S::Error>> { if self.iter.cache.is_dirty() { self.iter.cache.invalidate_cache_state(); } @@ -330,7 +328,7 @@ impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { } Err(e) => { self.iter.recover_from_reset_point(reset_point); - Err(e.into()) + Err(e) } } } else { @@ -358,7 +356,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> PeekIterator<'d, S, CI> { pub async fn next<'m>( &mut self, data_buffer: &'m mut [u8], - ) -> Result, Error> { + ) -> Result, Error< S::Error>> { // Note: Corruption repair is done in these functions already if self.iter.cache.is_dirty() { @@ -399,11 +397,9 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { flash: &'d mut S, flash_range: Range, cache: &'d mut CI, - ) -> Result> { + ) -> Result> { let start_address = run_with_auto_repair!( - function = Self::find_start_address(flash, flash_range.clone(), cache) - .await - .map_err(|e| e.into()), + function = Self::find_start_address(flash, flash_range.clone(), cache).await, repair = try_repair(flash, flash_range.clone(), cache).await? )?; @@ -419,7 +415,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { flash: &mut S, flash_range: Range, cache: &mut CI, - ) -> Result> { + ) -> Result> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -446,9 +442,9 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { async fn next<'m>( &mut self, data_buffer: &'m mut [u8], - ) -> Result, u32)>, Error> { + ) -> Result, u32)>, Error< S::Error>> { let value = run_with_auto_repair!( - function = self.next_inner(data_buffer).await.map_err(|e| e.into()), + function = self.next_inner(data_buffer).await, repair = try_repair(self.flash, self.flash_range.clone(), self.cache).await? ); @@ -458,7 +454,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { async fn next_inner( &mut self, data_buffer: &mut [u8], - ) -> Result, BasicError> { + ) -> Result, Error< S::Error>> { let mut data_buffer = Some(data_buffer); if self.cache.is_dirty() { @@ -574,7 +570,7 @@ pub async fn find_max_fit( flash: &mut S, flash_range: Range, cache: &mut impl CacheImpl, -) -> Result, Error> { +) -> Result, Error< S::Error>> { run_with_auto_repair!( function = find_max_fit_inner(flash, flash_range.clone(), cache).await, repair = try_repair(flash, flash_range.clone(), cache).await? @@ -585,7 +581,7 @@ async fn find_max_fit_inner( flash: &mut S, flash_range: Range, cache: &mut impl CacheImpl, -) -> Result, Error> { +) -> Result, Error< S::Error>> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -651,7 +647,7 @@ async fn find_youngest_page( flash: &mut S, flash_range: Range, cache: &mut impl PrivateCacheImpl, -) -> Result> { +) -> Result> { let last_used_page = find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await?; @@ -667,7 +663,7 @@ async fn find_youngest_page( } // All pages are closed... This is not correct. - Err(BasicError::Corrupted { + Err(Error::Corrupted { #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), }) @@ -677,7 +673,7 @@ async fn find_oldest_page( flash: &mut S, flash_range: Range, cache: &mut impl PrivateCacheImpl, -) -> Result> { +) -> Result> { let youngest_page = find_youngest_page(flash, flash_range.clone(), cache).await?; // The oldest page is the first non-open page after the youngest page @@ -701,7 +697,7 @@ async fn try_repair( flash: &mut S, flash_range: Range, cache: &mut impl CacheImpl, -) -> Result<(), BasicError> { +) -> Result<(), Error< S::Error>> { cache.invalidate_cache_state(); crate::try_general_repair(flash, flash_range.clone(), cache).await?; From 95d39f6c3c024fcc8776085e8d19aacd2b59450f Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Tue, 26 Mar 2024 13:31:21 +0100 Subject: [PATCH 04/10] Reenable cache tests --- src/cache/tests.rs | 535 +++++++++++++++++++-------------------------- 1 file changed, 231 insertions(+), 304 deletions(-) diff --git a/src/cache/tests.rs b/src/cache/tests.rs index a2be55f..729114d 100644 --- a/src/cache/tests.rs +++ b/src/cache/tests.rs @@ -1,304 +1,231 @@ -// #[cfg(test)] -// mod queue_tests { -// use core::ops::Range; - -// use crate::{ -// cache::{CacheImpl, NoCache, PagePointerCache, PageStateCache}, -// mock_flash::{self, FlashStatsResult, WriteCountCheck}, -// queue::{peek, pop, push}, -// AlignedBuf, -// }; - -// use futures_test::test; - -// const NUM_PAGES: usize = 4; -// const LOOP_COUNT: usize = 2000; - -// #[test] -// async fn no_cache() { -// assert_eq!( -// run_test(&mut NoCache::new()).await, -// FlashStatsResult { -// erases: 146, -// reads: 594934, -// writes: 6299, -// bytes_read: 2766058, -// bytes_written: 53299 -// } -// ); -// } - -// #[test] -// async fn page_state_cache() { -// assert_eq!( -// run_test(&mut PageStateCache::::new()).await, -// FlashStatsResult { -// erases: 146, -// reads: 308740, -// writes: 6299, -// bytes_read: 2479864, -// bytes_written: 53299 -// } -// ); -// } - -// #[test] -// async fn page_pointer_cache() { -// assert_eq!( -// run_test(&mut PagePointerCache::::new()).await, -// FlashStatsResult { -// erases: 146, -// reads: 211172, -// writes: 6299, -// bytes_read: 1699320, -// bytes_written: 53299 -// } -// ); -// } - -// async fn run_test(cache: &mut impl CacheImpl) -> FlashStatsResult { -// let mut flash = -// mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); -// const FLASH_RANGE: Range = 0x00..0x400; -// let mut data_buffer = AlignedBuf([0; 1024]); - -// let start_snapshot = flash.stats_snapshot(); - -// for i in 0..LOOP_COUNT { -// println!("{i}"); -// let data = vec![i as u8; i % 20 + 1]; - -// println!("PUSH"); -// push(&mut flash, FLASH_RANGE, cache, &data, true) -// .await -// .unwrap(); -// assert_eq!( -// peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) -// .await -// .unwrap() -// .unwrap(), -// &data, -// "At {i}" -// ); -// println!("POP"); -// assert_eq!( -// pop(&mut flash, FLASH_RANGE, cache, &mut data_buffer) -// .await -// .unwrap() -// .unwrap(), -// &data, -// "At {i}" -// ); -// println!("PEEK"); -// assert_eq!( -// peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) -// .await -// .unwrap(), -// None, -// "At {i}" -// ); -// println!("DONE"); -// } - -// start_snapshot.compare_to(flash.stats_snapshot()) -// } -// } - -// #[cfg(test)] -// mod map_tests { -// use core::ops::Range; - -// use crate::{ -// cache::{KeyCacheImpl, KeyPointerCache, NoCache, PagePointerCache, PageStateCache}, -// map::{fetch_item, store_item}, -// mock_flash::{self, FlashStatsResult, WriteCountCheck}, -// AlignedBuf, -// }; - -// use futures_test::test; - -// const NUM_PAGES: usize = 4; - -// #[test] -// async fn no_cache() { -// assert_eq!( -// run_test(&mut NoCache::new()).await, -// FlashStatsResult { -// erases: 198, -// reads: 224161, -// writes: 5201, -// bytes_read: 1770974, -// bytes_written: 50401 -// } -// ); -// } - -// #[test] -// async fn page_state_cache() { -// assert_eq!( -// run_test(&mut PageStateCache::::new()).await, -// FlashStatsResult { -// erases: 198, -// reads: 171543, -// writes: 5201, -// bytes_read: 1718356, -// bytes_written: 50401 -// } -// ); -// } - -// #[test] -// async fn page_pointer_cache() { -// assert_eq!( -// run_test(&mut PagePointerCache::::new()).await, -// FlashStatsResult { -// erases: 198, -// reads: 153667, -// writes: 5201, -// bytes_read: 1575348, -// bytes_written: 50401 -// } -// ); -// } - -// #[test] -// async fn key_pointer_cache_half() { -// assert_eq!( -// run_test(&mut KeyPointerCache::::new()).await, -// FlashStatsResult { -// erases: 198, -// reads: 130523, -// writes: 5201, -// bytes_read: 1318479, -// bytes_written: 50401 -// } -// ); -// } - -// #[test] -// async fn key_pointer_cache_full() { -// assert_eq!( -// run_test(&mut KeyPointerCache::::new()).await, -// FlashStatsResult { -// erases: 198, -// reads: 14506, -// writes: 5201, -// bytes_read: 150566, -// bytes_written: 50401 -// } -// ); -// } - -// #[derive(Debug, PartialEq, Eq)] -// struct MockStorageItem { -// key: u8, -// value: Vec, -// } - -// #[derive(Debug, PartialEq, Eq)] -// enum MockStorageItemError { -// BufferTooSmall, -// InvalidKey, -// BufferTooBig, -// } - -// impl StorageItem for MockStorageItem { -// type Key = u8; - -// type Error = MockStorageItemError; - -// fn serialize_into(&self, buffer: &mut [u8]) -> Result { -// if buffer.len() < 2 + self.value.len() { -// return Err(MockStorageItemError::BufferTooSmall); -// } - -// if self.value.len() > 255 { -// return Err(MockStorageItemError::BufferTooBig); -// } - -// // The serialized value must not be all 0xFF -// if self.key == 0xFF { -// return Err(MockStorageItemError::InvalidKey); -// } - -// buffer[0] = self.key; -// buffer[1] = self.value.len() as u8; -// buffer[2..][..self.value.len()].copy_from_slice(&self.value); - -// Ok(2 + self.value.len()) -// } - -// fn deserialize_from(buffer: &[u8]) -> Result -// where -// Self: Sized, -// { -// if buffer.len() < 2 { -// return Err(MockStorageItemError::BufferTooSmall); -// } - -// if buffer[0] == 0xFF { -// return Err(MockStorageItemError::InvalidKey); -// } - -// let len = buffer[1]; - -// if buffer.len() < 2 + len as usize { -// return Err(MockStorageItemError::BufferTooSmall); -// } - -// Ok(Self { -// key: buffer[0], -// value: buffer[2..][..len as usize].to_vec(), -// }) -// } - -// fn key(&self) -> Self::Key { -// self.key -// } -// } - -// async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { -// let mut flash = -// mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); -// const FLASH_RANGE: Range = 0x00..0x400; -// let mut data_buffer = AlignedBuf([0; 128]); - -// const LENGHT_PER_KEY: [usize; 24] = [ -// 11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5, -// ]; - -// let start_snapshot = flash.stats_snapshot(); - -// for _ in 0..100 { -// for i in 0..24 { -// let item = MockStorageItem { -// key: i as u8, -// value: vec![i as u8; LENGHT_PER_KEY[i]], -// }; - -// store_item::<_, _>(&mut flash, FLASH_RANGE, cache, &mut data_buffer, &item) -// .await -// .unwrap(); -// } - -// for i in 0..24 { -// let item = fetch_item::( -// &mut flash, -// FLASH_RANGE, -// cache, -// &mut data_buffer, -// i as u8, -// ) -// .await -// .unwrap() -// .unwrap(); - -// println!("Fetched {item:?}"); - -// assert_eq!(item.value, vec![i as u8; LENGHT_PER_KEY[i]]); -// } -// } - -// start_snapshot.compare_to(flash.stats_snapshot()) -// } -// } +#[cfg(test)] +mod queue_tests { + use core::ops::Range; + + use crate::{ + cache::{CacheImpl, NoCache, PagePointerCache, PageStateCache}, + mock_flash::{self, FlashStatsResult, WriteCountCheck}, + queue::{peek, pop, push}, + AlignedBuf, + }; + + use futures_test::test; + + const NUM_PAGES: usize = 4; + const LOOP_COUNT: usize = 2000; + + #[test] + async fn no_cache() { + assert_eq!( + run_test(&mut NoCache::new()).await, + FlashStatsResult { + erases: 146, + reads: 594934, + writes: 6299, + bytes_read: 2766058, + bytes_written: 53299 + } + ); + } + + #[test] + async fn page_state_cache() { + assert_eq!( + run_test(&mut PageStateCache::::new()).await, + FlashStatsResult { + erases: 146, + reads: 308740, + writes: 6299, + bytes_read: 2479864, + bytes_written: 53299 + } + ); + } + + #[test] + async fn page_pointer_cache() { + assert_eq!( + run_test(&mut PagePointerCache::::new()).await, + FlashStatsResult { + erases: 146, + reads: 211172, + writes: 6299, + bytes_read: 1699320, + bytes_written: 53299 + } + ); + } + + async fn run_test(cache: &mut impl CacheImpl) -> FlashStatsResult { + let mut flash = + mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); + const FLASH_RANGE: Range = 0x00..0x400; + let mut data_buffer = AlignedBuf([0; 1024]); + + let start_snapshot = flash.stats_snapshot(); + + for i in 0..LOOP_COUNT { + println!("{i}"); + let data = vec![i as u8; i % 20 + 1]; + + println!("PUSH"); + push(&mut flash, FLASH_RANGE, cache, &data, true) + .await + .unwrap(); + assert_eq!( + peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) + .await + .unwrap() + .unwrap(), + &data, + "At {i}" + ); + println!("POP"); + assert_eq!( + pop(&mut flash, FLASH_RANGE, cache, &mut data_buffer) + .await + .unwrap() + .unwrap(), + &data, + "At {i}" + ); + println!("PEEK"); + assert_eq!( + peek(&mut flash, FLASH_RANGE, cache, &mut data_buffer) + .await + .unwrap(), + None, + "At {i}" + ); + println!("DONE"); + } + + start_snapshot.compare_to(flash.stats_snapshot()) + } +} + +#[cfg(test)] +mod map_tests { + use core::ops::Range; + + use crate::{ + cache::{KeyCacheImpl, KeyPointerCache, NoCache, PagePointerCache, PageStateCache}, + map::{fetch_item, store_item}, + mock_flash::{self, FlashStatsResult, WriteCountCheck}, + AlignedBuf, + }; + + use futures_test::test; + + const NUM_PAGES: usize = 4; + + #[test] + async fn no_cache() { + assert_eq!( + run_test(&mut NoCache::new()).await, + FlashStatsResult { + erases: 198, + reads: 224161, + writes: 5201, + bytes_read: 1770974, + bytes_written: 50401 + } + ); + } + + #[test] + async fn page_state_cache() { + assert_eq!( + run_test(&mut PageStateCache::::new()).await, + FlashStatsResult { + erases: 198, + reads: 171543, + writes: 5201, + bytes_read: 1718356, + bytes_written: 50401 + } + ); + } + + #[test] + async fn page_pointer_cache() { + assert_eq!( + run_test(&mut PagePointerCache::::new()).await, + FlashStatsResult { + erases: 198, + reads: 153667, + writes: 5201, + bytes_read: 1575348, + bytes_written: 50401 + } + ); + } + + #[test] + async fn key_pointer_cache_half() { + assert_eq!( + run_test(&mut KeyPointerCache::::new()).await, + FlashStatsResult { + erases: 198, + reads: 130523, + writes: 5201, + bytes_read: 1318479, + bytes_written: 50401 + } + ); + } + + #[test] + async fn key_pointer_cache_full() { + assert_eq!( + run_test(&mut KeyPointerCache::::new()).await, + FlashStatsResult { + erases: 198, + reads: 14506, + writes: 5201, + bytes_read: 150566, + bytes_written: 50401 + } + ); + } + + async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { + let mut flash = + mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); + const FLASH_RANGE: Range = 0x00..0x400; + let mut data_buffer = AlignedBuf([0; 128]); + + const LENGHT_PER_KEY: [usize; 24] = [ + 11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5, + ]; + + let start_snapshot = flash.stats_snapshot(); + + for _ in 0..100 { + for i in 0..24 { + store_item(&mut flash, FLASH_RANGE, cache, &mut data_buffer, i as u8, &vec![i as u8; LENGHT_PER_KEY[i]].as_slice()) + .await + .unwrap(); + } + + for i in 0..24 { + let item = fetch_item::( + &mut flash, + FLASH_RANGE, + cache, + &mut data_buffer, + i as u8, + ) + .await + .unwrap() + .unwrap(); + + // println!("Fetched {item:?}"); + + assert_eq!(item, vec![i as u8; LENGHT_PER_KEY[i]]); + } + } + + start_snapshot.compare_to(flash.stats_snapshot()) + } +} From 342ca4ff8ff5da1ab7097e69be8e4394a61e9b15 Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Tue, 26 Mar 2024 15:52:33 +0100 Subject: [PATCH 05/10] Fix wrongful cache invalidation issue --- src/cache/mod.rs | 24 +++++++++++++++++++ src/cache/tests.rs | 57 ++++++++++++++++++++++++++++++---------------- src/item.rs | 32 ++++++++++++-------------- src/lib.rs | 12 +++++----- src/map.rs | 3 +++ src/queue.rs | 36 ++++++++++++++--------------- 6 files changed, 102 insertions(+), 62 deletions(-) diff --git a/src/cache/mod.rs b/src/cache/mod.rs index e37836a..3b94a97 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -215,6 +215,12 @@ impl NoCache { } } +impl Default for NoCache { + fn default() -> Self { + Self::new() + } +} + impl PrivateCacheImpl for NoCache { type PSC = UncachedPageStates; type PPC = UncachedPagePointers; @@ -279,6 +285,12 @@ impl PageStateCache { } } +impl Default for PageStateCache { + fn default() -> Self { + Self::new() + } +} + impl PrivateCacheImpl for PageStateCache { type PSC = CachedPageStates; type PPC = UncachedPagePointers; @@ -346,6 +358,12 @@ impl PagePointerCache { } } +impl Default for PagePointerCache { + fn default() -> Self { + Self::new() + } +} + impl PrivateCacheImpl for PagePointerCache { type PSC = CachedPageStates; type PPC = CachedPagePointers; @@ -418,6 +436,12 @@ impl KeyPointerCache Default for KeyPointerCache { + fn default() -> Self { + Self::new() + } +} + impl PrivateCacheImpl for KeyPointerCache { diff --git a/src/cache/tests.rs b/src/cache/tests.rs index 729114d..b95fc4c 100644 --- a/src/cache/tests.rs +++ b/src/cache/tests.rs @@ -125,9 +125,9 @@ mod map_tests { run_test(&mut NoCache::new()).await, FlashStatsResult { erases: 198, - reads: 224161, + reads: 233786, writes: 5201, - bytes_read: 1770974, + bytes_read: 1837101, bytes_written: 50401 } ); @@ -139,9 +139,9 @@ mod map_tests { run_test(&mut PageStateCache::::new()).await, FlashStatsResult { erases: 198, - reads: 171543, + reads: 181162, writes: 5201, - bytes_read: 1718356, + bytes_read: 1784477, bytes_written: 50401 } ); @@ -153,9 +153,9 @@ mod map_tests { run_test(&mut PagePointerCache::::new()).await, FlashStatsResult { erases: 198, - reads: 153667, + reads: 163273, writes: 5201, - bytes_read: 1575348, + bytes_read: 1641365, bytes_written: 50401 } ); @@ -164,12 +164,12 @@ mod map_tests { #[test] async fn key_pointer_cache_half() { assert_eq!( - run_test(&mut KeyPointerCache::::new()).await, + run_test(&mut KeyPointerCache::::new()).await, FlashStatsResult { erases: 198, - reads: 130523, + reads: 131503, writes: 5201, - bytes_read: 1318479, + bytes_read: 1299275, bytes_written: 50401 } ); @@ -178,18 +178,18 @@ mod map_tests { #[test] async fn key_pointer_cache_full() { assert_eq!( - run_test(&mut KeyPointerCache::::new()).await, + run_test(&mut KeyPointerCache::::new()).await, FlashStatsResult { erases: 198, - reads: 14506, + reads: 14510, writes: 5201, - bytes_read: 150566, + bytes_read: 150592, bytes_written: 50401 } ); } - async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { + async fn run_test(cache: &mut impl KeyCacheImpl) -> FlashStatsResult { let mut flash = mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); const FLASH_RANGE: Range = 0x00..0x400; @@ -202,19 +202,36 @@ mod map_tests { let start_snapshot = flash.stats_snapshot(); for _ in 0..100 { - for i in 0..24 { - store_item(&mut flash, FLASH_RANGE, cache, &mut data_buffer, i as u8, &vec![i as u8; LENGHT_PER_KEY[i]].as_slice()) - .await - .unwrap(); + const WRITE_ORDER: [usize; 24] = [ + 15, 0, 4, 22, 18, 11, 19, 8, 14, 23, 5, 1, 16, 10, 6, 12, 20, 17, 3, 9, 7, 13, 21, + 2, + ]; + + for i in WRITE_ORDER { + store_item( + &mut flash, + FLASH_RANGE, + cache, + &mut data_buffer, + i as u16, + &vec![i as u8; LENGHT_PER_KEY[i]].as_slice(), + ) + .await + .unwrap(); } - for i in 0..24 { - let item = fetch_item::( + const READ_ORDER: [usize; 24] = [ + 8, 22, 21, 11, 16, 23, 13, 15, 19, 7, 6, 2, 12, 1, 17, 4, 20, 14, 10, 5, 9, 3, 18, + 0, + ]; + + for i in READ_ORDER { + let item = fetch_item::( &mut flash, FLASH_RANGE, cache, &mut data_buffer, - i as u8, + i as u16, ) .await .unwrap() diff --git a/src/item.rs b/src/item.rs index 068f1f0..54a1342 100644 --- a/src/item.rs +++ b/src/item.rs @@ -21,8 +21,8 @@ //! and has some other modifications to make corruption less likely to happen. //! -use core::ops::Range; use core::num::NonZeroU32; +use core::ops::Range; use embedded_storage_async::nor_flash::{MultiwriteNorFlash, NorFlash}; @@ -54,7 +54,7 @@ impl ItemHeader { flash: &mut S, address: u32, end_address: u32, - ) -> Result, Error< S::Error>> { + ) -> Result, Error> { let mut buffer = [0; MAX_WORD_SIZE]; let header_slice = &mut buffer[..round_up_to_alignment_usize::(Self::LENGTH)]; @@ -104,7 +104,7 @@ impl ItemHeader { data_buffer: &'d mut [u8], address: u32, end_address: u32, - ) -> Result, Error< S::Error>> { + ) -> Result, Error> { match self.crc { None => Ok(MaybeItem::Erased(self, data_buffer)), Some(header_crc) => { @@ -141,11 +141,7 @@ impl ItemHeader { } } - async fn write( - &self, - flash: &mut S, - address: u32, - ) -> Result<(), Error< S::Error>> { + async fn write(&self, flash: &mut S, address: u32) -> Result<(), Error> { let mut buffer = AlignedBuf([0xFF; MAX_WORD_SIZE]); buffer[Self::DATA_CRC_FIELD] @@ -174,7 +170,7 @@ impl ItemHeader { flash_range: Range, cache: &mut impl PrivateCacheImpl, address: u32, - ) -> Result> { + ) -> Result> { self.crc = None; cache.notice_item_erased::(flash_range.clone(), address, &self); self.write(flash, address).await?; @@ -223,7 +219,7 @@ impl<'d> Item<'d> { cache: &mut impl PrivateCacheImpl, address: u32, data: &'d [u8], - ) -> Result> { + ) -> Result> { let header = ItemHeader { length: data.len() as u16, crc: Some(adapted_crc32(data)), @@ -241,7 +237,7 @@ impl<'d> Item<'d> { header: &ItemHeader, data: &[u8], address: u32, - ) -> Result<(), Error< S::Error>> { + ) -> Result<(), Error> { cache.notice_item_written::(flash_range, address, header); header.write(flash, address).await?; @@ -282,7 +278,7 @@ impl<'d> Item<'d> { flash_range: Range, cache: &mut impl PrivateCacheImpl, address: u32, - ) -> Result<(), Error< S::Error>> { + ) -> Result<(), Error> { Self::write_raw( flash, flash_range, @@ -340,7 +336,7 @@ pub async fn find_next_free_item_spot( start_address: u32, end_address: u32, data_length: u32, -) -> Result, Error< S::Error>> { +) -> Result, Error> { let page_index = calculate_page_index::(flash_range, start_address); let free_item_address = match cache.first_item_after_written(page_index) { @@ -392,7 +388,7 @@ impl<'d> core::fmt::Debug for MaybeItem<'d> { } impl<'d> MaybeItem<'d> { - pub fn unwrap(self) -> Result, Error< E>> { + pub fn unwrap(self) -> Result, Error> { match self { MaybeItem::Corrupted(_, _) => Err(Error::Corrupted { #[cfg(feature = "_test")] @@ -476,7 +472,7 @@ pub async fn is_page_empty( cache: &mut impl PrivateCacheImpl, page_index: usize, page_state: Option, -) -> Result> { +) -> Result> { let page_state = match page_state { Some(page_state) => page_state, None => get_page_state::(flash, flash_range.clone(), cache, page_index).await?, @@ -521,7 +517,7 @@ impl ItemIter { &mut self, flash: &mut S, data_buffer: &'m mut [u8], - ) -> Result, u32)>, Error< S::Error>> { + ) -> Result, u32)>, Error> { let mut data_buffer = Some(data_buffer); while let (Some(header), address) = self.header.next(flash).await? { let buffer = data_buffer.take().unwrap(); @@ -558,7 +554,7 @@ impl ItemHeaderIter { pub async fn next( &mut self, flash: &mut S, - ) -> Result<(Option, u32), Error< S::Error>> { + ) -> Result<(Option, u32), Error> { self.traverse(flash, |_, _| false).await } @@ -570,7 +566,7 @@ impl ItemHeaderIter { &mut self, flash: &mut S, callback: impl Fn(&ItemHeader, u32) -> bool, - ) -> Result<(Option, u32), Error< S::Error>> { + ) -> Result<(Option, u32), Error> { loop { match ItemHeader::read_new(flash, self.current_address, self.end_address).await { Ok(Some(header)) => { diff --git a/src/lib.rs b/src/lib.rs index 0e6e357..82930e1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -50,7 +50,7 @@ async fn try_general_repair( flash: &mut S, flash_range: Range, cache: &mut impl PrivateCacheImpl, -) -> Result<(), Error< S::Error>> { +) -> Result<(), Error> { // Loop through the pages and get their state. If one returns the corrupted error, // the page is likely half-erased. Fix for that is to re-erase again to hopefully finish the job. for page_index in get_pages::(flash_range.clone(), 0) { @@ -74,7 +74,7 @@ async fn find_first_page( cache: &mut impl PrivateCacheImpl, starting_page_index: usize, page_state: PageState, -) -> Result, Error< S::Error>> { +) -> Result, Error> { for page_index in get_pages::(flash_range.clone(), starting_page_index) { if page_state == get_page_state::(flash, flash_range.clone(), cache, page_index).await? { return Ok(Some(page_index)); @@ -138,7 +138,7 @@ async fn get_page_state( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result> { +) -> Result> { if let Some(cached_page_state) = cache.get_page_state(page_index) { return Ok(cached_page_state); } @@ -206,7 +206,7 @@ async fn open_page( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result<(), Error< S::Error>> { +) -> Result<(), Error> { cache.notice_page_state(page_index, PageState::Open, true); flash @@ -230,7 +230,7 @@ async fn close_page( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result<(), Error< S::Error>> { +) -> Result<(), Error> { let current_state = partial_close_page::(flash, flash_range.clone(), cache, page_index).await?; @@ -263,7 +263,7 @@ async fn partial_close_page( flash_range: Range, cache: &mut impl PrivateCacheImpl, page_index: usize, -) -> Result> { +) -> Result> { let current_state = get_page_state::(flash, flash_range.clone(), cache, page_index).await?; if current_state != PageState::Open { diff --git a/src/map.rs b/src/map.rs index 290d573..b3c26fc 100644 --- a/src/map.rs +++ b/src/map.rs @@ -736,6 +736,9 @@ async fn migrate_items( let key = K::deserialize_from(&item.data()[..K::LEN]); let (_, data_buffer) = item.destruct(); + // We're in a decent state here + cache.unmark_dirty(); + // Search for the newest item with the key we found let Some((found_item, found_address)) = fetch_item_with_location::( flash, diff --git a/src/queue.rs b/src/queue.rs index 516e131..c0ca295 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -76,7 +76,7 @@ pub async fn push( cache: &mut impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, -) -> Result<(), Error< S::Error>> { +) -> Result<(), Error> { run_with_auto_repair!( function = push_inner( flash, @@ -96,7 +96,7 @@ async fn push_inner( cache: &mut impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, -) -> Result<(), Error< S::Error>> { +) -> Result<(), Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -199,7 +199,7 @@ pub async fn peek_many<'d, S: NorFlash, CI: CacheImpl>( flash: &'d mut S, flash_range: Range, cache: &'d mut CI, -) -> Result, Error< S::Error>> { +) -> Result, Error> { // Note: Corruption repair is done in these functions already Ok(PeekIterator { iter: QueueIterator::new(flash, flash_range, cache).await?, @@ -221,7 +221,7 @@ pub async fn peek<'d, S: NorFlash>( flash_range: Range, cache: &mut impl CacheImpl, data_buffer: &'d mut [u8], -) -> Result, Error< S::Error>> { +) -> Result, Error> { // Note: Corruption repair is done in these functions already peek_many(flash, flash_range, cache) .await? @@ -238,7 +238,7 @@ pub async fn pop_many<'d, S: MultiwriteNorFlash, CI: CacheImpl>( flash: &'d mut S, flash_range: Range, cache: &'d mut CI, -) -> Result, Error< S::Error>> { +) -> Result, Error> { // Note: Corruption repair is done in these functions already Ok(PopIterator { iter: QueueIterator::new(flash, flash_range, cache).await?, @@ -260,7 +260,7 @@ pub async fn pop<'d, S: MultiwriteNorFlash>( flash_range: Range, cache: &mut impl CacheImpl, data_buffer: &'d mut [u8], -) -> Result, Error< S::Error>> { +) -> Result, Error> { pop_many(flash, flash_range, cache) .await? .next(data_buffer) @@ -285,7 +285,7 @@ impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { pub async fn next<'m>( &mut self, data_buffer: &'m mut [u8], - ) -> Result, Error< S::Error>> { + ) -> Result, Error> { let value = run_with_auto_repair!( function = self.next_inner(data_buffer).await, repair = try_repair( @@ -302,7 +302,7 @@ impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { async fn next_inner( &mut self, data_buffer: &mut [u8], - ) -> Result, Error< S::Error>> { + ) -> Result, Error> { if self.iter.cache.is_dirty() { self.iter.cache.invalidate_cache_state(); } @@ -356,7 +356,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> PeekIterator<'d, S, CI> { pub async fn next<'m>( &mut self, data_buffer: &'m mut [u8], - ) -> Result, Error< S::Error>> { + ) -> Result, Error> { // Note: Corruption repair is done in these functions already if self.iter.cache.is_dirty() { @@ -397,7 +397,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { flash: &'d mut S, flash_range: Range, cache: &'d mut CI, - ) -> Result> { + ) -> Result> { let start_address = run_with_auto_repair!( function = Self::find_start_address(flash, flash_range.clone(), cache).await, repair = try_repair(flash, flash_range.clone(), cache).await? @@ -415,7 +415,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { flash: &mut S, flash_range: Range, cache: &mut CI, - ) -> Result> { + ) -> Result> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -442,7 +442,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { async fn next<'m>( &mut self, data_buffer: &'m mut [u8], - ) -> Result, u32)>, Error< S::Error>> { + ) -> Result, u32)>, Error> { let value = run_with_auto_repair!( function = self.next_inner(data_buffer).await, repair = try_repair(self.flash, self.flash_range.clone(), self.cache).await? @@ -454,7 +454,7 @@ impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { async fn next_inner( &mut self, data_buffer: &mut [u8], - ) -> Result, Error< S::Error>> { + ) -> Result, Error> { let mut data_buffer = Some(data_buffer); if self.cache.is_dirty() { @@ -570,7 +570,7 @@ pub async fn find_max_fit( flash: &mut S, flash_range: Range, cache: &mut impl CacheImpl, -) -> Result, Error< S::Error>> { +) -> Result, Error> { run_with_auto_repair!( function = find_max_fit_inner(flash, flash_range.clone(), cache).await, repair = try_repair(flash, flash_range.clone(), cache).await? @@ -581,7 +581,7 @@ async fn find_max_fit_inner( flash: &mut S, flash_range: Range, cache: &mut impl CacheImpl, -) -> Result, Error< S::Error>> { +) -> Result, Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -647,7 +647,7 @@ async fn find_youngest_page( flash: &mut S, flash_range: Range, cache: &mut impl PrivateCacheImpl, -) -> Result> { +) -> Result> { let last_used_page = find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await?; @@ -673,7 +673,7 @@ async fn find_oldest_page( flash: &mut S, flash_range: Range, cache: &mut impl PrivateCacheImpl, -) -> Result> { +) -> Result> { let youngest_page = find_youngest_page(flash, flash_range.clone(), cache).await?; // The oldest page is the first non-open page after the youngest page @@ -697,7 +697,7 @@ async fn try_repair( flash: &mut S, flash_range: Range, cache: &mut impl CacheImpl, -) -> Result<(), Error< S::Error>> { +) -> Result<(), Error> { cache.invalidate_cache_state(); crate::try_general_repair(flash, flash_range.clone(), cache).await?; From 2a55b8dda0aca41fc0cfa0d7c006a4c56cddf448 Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Tue, 26 Mar 2024 17:08:45 +0100 Subject: [PATCH 06/10] Fix outstanding issues --- README.md | 4 +- fuzz/fuzz_targets/map.rs | 81 +++++++++------------------------------- src/lib.rs | 7 +--- 3 files changed, 20 insertions(+), 72 deletions(-) diff --git a/README.md b/README.md index 0052160..ada3ee5 100644 --- a/README.md +++ b/README.md @@ -88,8 +88,8 @@ These numbers are taken from the test cases in the cache module: | ---------------: | -------------------------------------------: | ----------------: | -------------------: | ------------------: | ---------------------: | | NoCache | 0 | 100% | 100% | 100% | 100% | | PageStateCache | 1 * num pages | 77% | 97% | 51% | 90% | -| PagePointerCache | 9 * num pages | 69% | 89% | 35% | 61% | -| KeyPointerCache | 9 * num pages + (sizeof(KEY) + 4) * num keys | 6.5% | 8.5% | - | - | +| PagePointerCache | 9 * num pages | 70% | 89% | 35% | 61% | +| KeyPointerCache | 9 * num pages + (sizeof(KEY) + 4) * num keys | 6.2% | 8.2% | - | - | #### Takeaways diff --git a/fuzz/fuzz_targets/map.rs b/fuzz/fuzz_targets/map.rs index 43c2aff..c3eff7d 100644 --- a/fuzz/fuzz_targets/map.rs +++ b/fuzz/fuzz_targets/map.rs @@ -6,7 +6,6 @@ use libfuzzer_sys::fuzz_target; use rand::SeedableRng; use sequential_storage::{ cache::{KeyCacheImpl, KeyPointerCache, NoCache, PagePointerCache, PageStateCache}, - map::StorageItem, mock_flash::{MockFlashBase, MockFlashError, WriteCountCheck}, Error, }; @@ -45,57 +44,13 @@ struct StoreOp { } impl StoreOp { - fn into_test_item(self, rng: &mut impl rand::Rng) -> TestItem { - TestItem { - key: self.key, - value: (0..(self.value_len % 8) as usize) + fn into_test_item(self, rng: &mut impl rand::Rng) -> (u8, Vec) { + ( + self.key, + (0..(self.value_len % 8) as usize) .map(|_| rng.gen()) .collect(), - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -struct TestItem { - key: u8, - value: Vec, -} - -impl StorageItem for TestItem { - type Key = u8; - - type Error = (); - - fn serialize_into(&self, buffer: &mut [u8]) -> Result { - if buffer.len() < 1 + self.value.len() { - return Err(()); - } - - buffer[0] = self.key; - buffer[1..][..self.value.len()].copy_from_slice(&self.value); - - Ok(1 + self.value.len()) - } - - fn deserialize_from(buffer: &[u8]) -> Result - where - Self: Sized, - { - Ok(Self { - key: buffer[0], - value: buffer[1..].to_vec(), - }) - } - - fn key(&self) -> Self::Key { - self.key - } - - fn deserialize_key_only(buffer: &[u8]) -> Result - where - Self: Sized, - { - Ok(buffer[0]) + ) } } @@ -139,36 +94,35 @@ fn fuzz(ops: Input, mut cache: impl KeyCacheImpl + Debug) { match op.clone() { Op::Store(op) => { - let item = op.into_test_item(&mut rng); + let (key, value) = op.into_test_item(&mut rng); match block_on(sequential_storage::map::store_item( &mut flash, FLASH_RANGE, &mut cache, &mut buf.0, - &item, + key, + &value.as_slice(), )) { Ok(_) => { - map.insert(item.key, item.value); + map.insert(key, value); } Err(Error::FullStorage) => {} Err(Error::Storage { value: MockFlashError::EarlyShutoff(_), backtrace: _backtrace, }) => { - match block_on(sequential_storage::map::fetch_item::( + match block_on(sequential_storage::map::fetch_item::( &mut flash, FLASH_RANGE, &mut cache, &mut buf.0, - item.key, + key, )) { - Ok(Some(check_item)) - if check_item.key == item.key && check_item.value == item.value => - { + Ok(Some(check_item)) if check_item == value => { #[cfg(fuzzing_repro)] eprintln!("Early shutoff when storing {item:?}! (but it still stored fully). Originated from:\n{_backtrace:#}"); // Even though we got a shutoff, it still managed to store well - map.insert(item.key, item.value); + map.insert(key, value); } _ => { // Could not fetch the item we stored... @@ -188,7 +142,7 @@ fn fuzz(ops: Input, mut cache: impl KeyCacheImpl + Debug) { } } Op::Fetch(key) => { - match block_on(sequential_storage::map::fetch_item::( + match block_on(sequential_storage::map::fetch_item::( &mut flash, FLASH_RANGE, &mut cache, @@ -199,8 +153,7 @@ fn fuzz(ops: Input, mut cache: impl KeyCacheImpl + Debug) { let map_value = map .get(&key) .expect(&format!("Map doesn't contain: {fetch_result:?}")); - assert_eq!(key, fetch_result.key, "Mismatching keys"); - assert_eq!(map_value, &fetch_result.value, "Mismatching values"); + assert_eq!(map_value, &fetch_result, "Mismatching values"); } Ok(None) => { assert_eq!(None, map.get(&key)); @@ -223,7 +176,7 @@ fn fuzz(ops: Input, mut cache: impl KeyCacheImpl + Debug) { } } Op::Remove(key) => { - match block_on(sequential_storage::map::remove_item::( + match block_on(sequential_storage::map::remove_item::( &mut flash, FLASH_RANGE, &mut cache, @@ -237,7 +190,7 @@ fn fuzz(ops: Input, mut cache: impl KeyCacheImpl + Debug) { value: MockFlashError::EarlyShutoff(_), backtrace: _backtrace, }) => { - match block_on(sequential_storage::map::fetch_item::( + match block_on(sequential_storage::map::fetch_item::( &mut flash, FLASH_RANGE, &mut cache, diff --git a/src/lib.rs b/src/lib.rs index 82930e1..86387dc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -396,12 +396,7 @@ where } #[cfg(feature = "std")] -impl std::error::Error for Error -where - I: std::error::Error, - S: std::error::Error, -{ -} +impl std::error::Error for Error where S: std::error::Error {} /// Round up the the given number to align with the wordsize of the flash. /// If the number is already aligned, it is not changed. From 8851a74dbb51ff75f2c30bb66b69cc77df11984c Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Tue, 26 Mar 2024 17:11:33 +0100 Subject: [PATCH 07/10] Prepare for doc fixing --- src/lib.rs | 2 +- src/map.rs | 58 ++++++++++++++++++++++++++++-------------------------- 2 files changed, 31 insertions(+), 29 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 86387dc..efa1ef1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ #![cfg_attr(not(any(test, doctest, feature = "std")), no_std)] -// #![deny(missing_docs)] +#![deny(missing_docs)] #![doc = include_str!("../README.md")] // Assumptions made in this crate: diff --git a/src/map.rs b/src/map.rs index b3c26fc..00506e1 100644 --- a/src/map.rs +++ b/src/map.rs @@ -80,23 +80,24 @@ use self::{ use super::*; -/// Get a storage item from the flash. -/// Only the last stored item of the given key is returned. -/// -/// If no value with the key is found, None is returned. -/// -/// The data buffer must be long enough to hold the longest serialized data of your [StorageItem] type, -/// rounded up to flash word alignment. -/// -/// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time -/// or types that serialize and deserialize the key in the same way.* -pub async fn fetch_item<'d, K: Key, I: MapItem<'d>, S: NorFlash>( +// TODO revise +// /// Get a storage item from the flash. +// /// Only the last stored item of the given key is returned. +// /// +// /// If no value with the key is found, None is returned. +// /// +// /// The data buffer must be long enough to hold the longest serialized data of your [StorageItem] type, +// /// rounded up to flash word alignment. +// /// +// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time +// /// or types that serialize and deserialize the key in the same way.* +pub async fn fetch_item<'d, K: Key, V: Value<'d>, S: NorFlash>( flash: &mut S, flash_range: Range, cache: &mut impl KeyCacheImpl, data_buffer: &'d mut [u8], search_key: K, -) -> Result, Error> { +) -> Result, Error> { let result = run_with_auto_repair!( function = { fetch_item_with_location( @@ -120,7 +121,7 @@ pub async fn fetch_item<'d, K: Key, I: MapItem<'d>, S: NorFlash>( let data_len = item.data().len(); Ok(Some( - I::deserialize_from(&item.destruct().1[K::LEN..][..data_len - K::LEN]) + V::deserialize_from(&item.destruct().1[K::LEN..][..data_len - K::LEN]) .map_err(Error::Item)?, )) } @@ -282,21 +283,22 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( } } -/// Store an item into flash memory. -/// It will overwrite the last value that has the same key. -/// The flash needs to be at least 2 pages long. -/// -/// The data buffer must be long enough to hold the longest serialized data of your [StorageItem] type. -/// -/// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time -/// or types that serialize and deserialize the key in the same way.* -pub async fn store_item<'d, K: Key, I: MapItem<'d>, S: NorFlash>( +// TODO revise +// /// Store an item into flash memory. +// /// It will overwrite the last value that has the same key. +// /// The flash needs to be at least 2 pages long. +// /// +// /// The data buffer must be long enough to hold the longest serialized data of your [StorageItem] type. +// /// +// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time +// /// or types that serialize and deserialize the key in the same way.* +pub async fn store_item<'d, K: Key, V: Value<'d>, S: NorFlash>( flash: &mut S, flash_range: Range, cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], key: K, - item: &I, + item: &V, ) -> Result<(), Error> { run_with_auto_repair!( function = store_item_inner( @@ -318,7 +320,7 @@ async fn store_item_inner<'d, K: Key, S: NorFlash>( cache: &mut impl KeyCacheImpl, data_buffer: &mut [u8], key: K, - item: &dyn MapItem<'d>, + item: &dyn Value<'d>, ) -> Result<(), Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -622,14 +624,14 @@ impl Key for [u8; N] { } } -pub trait MapItem<'a> { +pub trait Value<'a> { fn serialize_into(&self, buffer: &mut [u8]) -> Result; fn deserialize_from(buffer: &'a [u8]) -> Result where Self: Sized; } -impl<'a> MapItem<'a> for &'a [u8] { +impl<'a> Value<'a> for &'a [u8] { fn serialize_into(&self, buffer: &mut [u8]) -> Result { if buffer.len() < self.len() { return Err(MapItemError::BufferTooSmall); @@ -647,7 +649,7 @@ impl<'a> MapItem<'a> for &'a [u8] { } } -impl<'a, const N: usize> MapItem<'a> for [u8; N] { +impl<'a, const N: usize> Value<'a> for [u8; N] { fn serialize_into(&self, buffer: &mut [u8]) -> Result { if buffer.len() < self.len() { return Err(MapItemError::BufferTooSmall); @@ -667,7 +669,7 @@ impl<'a, const N: usize> MapItem<'a> for [u8; N] { macro_rules! impl_map_item_num { ($int:ty) => { - impl<'a> MapItem<'a> for $int { + impl<'a> Value<'a> for $int { fn serialize_into(&self, buffer: &mut [u8]) -> Result { buffer[..core::mem::size_of::()].copy_from_slice(&self.to_le_bytes()); Ok(core::mem::size_of::()) From bda10c8a3d268d28ef6aa8baf0dc57b7ab3dc7d1 Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Tue, 26 Mar 2024 17:12:19 +0100 Subject: [PATCH 08/10] fmt --- src/cache/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/cache/mod.rs b/src/cache/mod.rs index 3b94a97..a0b6438 100644 --- a/src/cache/mod.rs +++ b/src/cache/mod.rs @@ -436,7 +436,9 @@ impl KeyPointerCache Default for KeyPointerCache { +impl Default + for KeyPointerCache +{ fn default() -> Self { Self::new() } From 49aa18a0b4171da68a7b60a14316fdd867c8aa3e Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Wed, 27 Mar 2024 11:07:37 +0100 Subject: [PATCH 09/10] Fix all docs and add to changelog --- CHANGELOG.md | 7 +++ Cargo.toml | 2 +- README.md | 4 +- src/lib.rs | 8 +-- src/map.rs | 159 ++++++++++++++++++++++++++++++++++++++------------- 5 files changed, 132 insertions(+), 48 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c63d51..28972cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ ## Unreleased +- *Breaking:* Made the cache API a bit more strict. Caches now always have to be passed as a mutable reference. + The API before would lead to a lot of extra unncesessary binary size. +- *Breaking:* Removed the `StorageItem` trait in favor of two separate `Key` and `Value` traits. This helps cut + binary size and is closer to what users of the map APIs were expecting. +- *Breaking:* The error type is no longer generic over the Item error. That error variant has been renamed `MapValueError` + and carries a predefined error subtype. + ## 1.0.0 01-03-24 - *Breaking:* Corruption repair is automatic now! The repair functions have been made private. diff --git a/Cargo.toml b/Cargo.toml index 0a38daf..be63cdb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sequential-storage" -version = "1.0.0" +version = "2.0.0" edition = "2021" license = "MIT OR Apache-2.0" description = "A crate for storing data in flash with minimal erase cycles." diff --git a/README.md b/README.md index ada3ee5..a99251e 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,8 @@ The in-flash representation is not (yet?) stable. This too follows semver. - A breaking change to the in-flash representation will lead to a major version bump - A feature addition will lead to a minor version bump - - This is always backward-compatible. So data created by e.g. `0.8.0` can be used by `0.8.1`. - - This may not be forward-compatible. So data created by e.g. `0.8.1` may not be usable by `0.8.0`. + - This is always backward-compatible. So data created by e.g. `1.0.0` can be used by `1.1.0`. + - This may not be forward-compatible. So data created by e.g. `1.0.1` may not be usable by `1.0.0`. - After 1.0, patch releases only fix bugs and don't change the in-flash representation For any update, consult the changelog to see what changed. Any externally noticable changes are recorded there. diff --git a/src/lib.rs b/src/lib.rs index efa1ef1..2093ec5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,7 +13,7 @@ use core::{ ops::{Deref, DerefMut, Range}, }; use embedded_storage_async::nor_flash::NorFlash; -use map::MapItemError; +use map::MapValueError; pub mod cache; mod item; @@ -360,8 +360,8 @@ pub enum Error { BufferTooBig, /// A provided buffer was to small to be used (usize is size needed) BufferTooSmall(usize), - /// A storage item error - Item(MapItemError), + /// A map value error + MapValueError(MapValueError), } impl PartialEq for Error { @@ -390,7 +390,7 @@ where f, "A provided buffer was to small to be used. Needed was {needed}" ), - Error::Item(value) => write!(f, "Item error: {value}"), + Error::MapValueError(value) => write!(f, "Map value error: {value}"), } } } diff --git a/src/map.rs b/src/map.rs index 00506e1..ef75f47 100644 --- a/src/map.rs +++ b/src/map.rs @@ -25,11 +25,11 @@ //! let flash_range = 0x1000..0x3000; //! // We need to give the crate a buffer to work with. //! // It must be big enough to serialize the biggest value of your storage type in, -//! // rounded up to to word alignment of the flash. Some kinds of flash may require +//! // rounded up to to word alignment of the flash. Some kinds of internal flash may require //! // this buffer to be aligned in RAM as well. //! let mut data_buffer = [0; 128]; //! -//! // We can fetch an item from the flash. +//! // We can fetch an item from the flash. We're using `u8` as our key type and `u32` as our value type. //! // Nothing is stored in it yet, so it will return None. //! //! assert_eq!( @@ -43,7 +43,9 @@ //! None //! ); //! -//! // Now we store an item the flash with key 42 +//! // Now we store an item the flash with key 42. +//! // Again we make sure we pass the correct key and value types, u8 and u32. +//! // It is important to do this consistently. //! //! store_item( //! &mut flash, @@ -68,6 +70,32 @@ //! ); //! # }); //! ``` +//! +//! ## Key and value traits +//! +//! In the previous example we saw we used one key and one value type. +//! It is ***crucial*** we use the same key type every time on the same range of flash. +//! This is because the internal items are serialized as `[key|value]`. A different key type +//! will have a different length and will make all data nonsense. +//! +//! However, if we have special knowledge about what we store for each key, +//! we are allowed to use different value types. +//! +//! For example, we can do the following: +//! +//! 1. Store a u32 with key 0 +//! 2. Store a custom type 'Foo' with key 1 +//! 3. Fetch a u32 with key 0 +//! 4. Fetch a custom type 'Foo' with key 1 +//! +//! It is up to the user to make sure this is done correctly. +//! If done incorrectly, the deserialize function of requested value type will see +//! data it doesn't expect. In the best case it'll return an error, in a bad case it'll +//! give bad invalid data and in the worst case the deserialization code panics. +//! So be careful. +//! +//! For your convenience there are premade implementations for the [Key] and [Value] traits. +//! use embedded_storage_async::nor_flash::MultiwriteNorFlash; @@ -80,17 +108,20 @@ use self::{ use super::*; -// TODO revise -// /// Get a storage item from the flash. -// /// Only the last stored item of the given key is returned. -// /// -// /// If no value with the key is found, None is returned. -// /// -// /// The data buffer must be long enough to hold the longest serialized data of your [StorageItem] type, -// /// rounded up to flash word alignment. -// /// -// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time -// /// or types that serialize and deserialize the key in the same way.* +/// Get the last stored value from the flash that is associated with the given key. +/// If no value with the key is found, None is returned. +/// +/// The data buffer must be long enough to hold the longest serialized data of your [Key] + [Value] types combined, +/// rounded up to flash word alignment. +/// +///
+/// +/// *You are required to, on a given flash range, use the same [Key] type every time. You are allowed to use* +/// *multiple [Value] types. See the module-level docs for more information about this.* +/// +/// Also watch out for using integers. This function will take any integer and it's easy to pass the wrong type. +/// +///
pub async fn fetch_item<'d, K: Key, V: Value<'d>, S: NorFlash>( flash: &mut S, flash_range: Range, @@ -122,7 +153,7 @@ pub async fn fetch_item<'d, K: Key, V: Value<'d>, S: NorFlash>( Ok(Some( V::deserialize_from(&item.destruct().1[K::LEN..][..data_len - K::LEN]) - .map_err(Error::Item)?, + .map_err(Error::MapValueError)?, )) } @@ -283,15 +314,21 @@ async fn fetch_item_with_location<'d, K: Key, S: NorFlash>( } } -// TODO revise -// /// Store an item into flash memory. -// /// It will overwrite the last value that has the same key. -// /// The flash needs to be at least 2 pages long. -// /// -// /// The data buffer must be long enough to hold the longest serialized data of your [StorageItem] type. -// /// -// /// *Note: On a given flash range, make sure to use only the same type as [StorageItem] every time -// /// or types that serialize and deserialize the key in the same way.* +/// Store a key-value pair into flash memory. +/// It will overwrite the last value that has the same key. +/// The flash needs to be at least 2 pages long. +/// +/// The data buffer must be long enough to hold the longest serialized data of your [Key] + [Value] types combined, +/// rounded up to flash word alignment. +/// +///
+/// +/// *You are required to, on a given flash range, use the same [Key] type every time. You are allowed to use* +/// *multiple [Value] types. See the module-level docs for more information about this.* +/// +/// Also watch out for using integers. This function will take any integer and it's easy to pass the wrong type. +/// +///
pub async fn store_item<'d, K: Key, V: Value<'d>, S: NorFlash>( flash: &mut S, flash_range: Range, @@ -379,7 +416,7 @@ async fn store_item_inner<'d, K: Key, S: NorFlash>( let item_data_length = K::LEN + item .serialize_into(&mut data_buffer[K::LEN..]) - .map_err(Error::Item)?; + .map_err(Error::MapValueError)?; let free_spot_address = find_next_free_item_spot( flash, @@ -491,6 +528,15 @@ async fn store_item_inner<'d, K: Key, S: NorFlash>( /// All items in flash have to be read and deserialized to find the items with the key. /// This is unlikely to be cached well. /// +/// +///
+/// +/// *You are required to, on a given flash range, use the same [Key] type every time. You are allowed to use* +/// *multiple [Value] types. See the module-level docs for more information about this.* +/// +/// Also watch out for using integers. This function will take any integer and it's easy to pass the wrong type. +/// +///
pub async fn remove_item( flash: &mut S, flash_range: Range, @@ -578,10 +624,24 @@ async fn remove_item_inner( Ok(()) } +/// Anything implementing this trait can be used as a key in the map functions. +/// +/// It provides a way to serialize and deserialize the key as well as provide a +/// constant for the serialized length. +/// +/// The functions don't return a result. Keys should be simple and trivial. +/// +/// The `Eq` bound is used because we need to be able to compare keys and the +/// `Clone` bound helps us pass the key around. pub trait Key: Eq + Clone + Sized { + /// The serialized length of the key const LEN: usize; + /// Serialize the key into the given buffer. + /// The buffer is always of the same length as the [Self::LEN] constant. fn serialize_into(&self, buffer: &mut [u8]); + /// Deserialize the key from the given buffer. + /// The buffer is always of the same length as the [Self::LEN] constant. fn deserialize_from(buffer: &[u8]) -> Self; } @@ -624,24 +684,32 @@ impl Key for [u8; N] { } } +/// The trait that defines how map values are serialized and deserialized. +/// +/// It also carries a lifetime so that zero-copy deserialization is supported. +/// Zero-copy serialization is not supported due to technical restrictions. pub trait Value<'a> { - fn serialize_into(&self, buffer: &mut [u8]) -> Result; - fn deserialize_from(buffer: &'a [u8]) -> Result + /// Serialize the value into the given buffer. If everything went ok, this function returns the length + /// of the used part of the buffer. + fn serialize_into(&self, buffer: &mut [u8]) -> Result; + /// Deserialize the value from the buffer. Because of the added lifetime, the implementation can borrow from the + /// buffer which opens up some zero-copy possibilities. + fn deserialize_from(buffer: &'a [u8]) -> Result where Self: Sized; } impl<'a> Value<'a> for &'a [u8] { - fn serialize_into(&self, buffer: &mut [u8]) -> Result { + fn serialize_into(&self, buffer: &mut [u8]) -> Result { if buffer.len() < self.len() { - return Err(MapItemError::BufferTooSmall); + return Err(MapValueError::BufferTooSmall); } buffer[..self.len()].copy_from_slice(self); Ok(self.len()) } - fn deserialize_from(buffer: &'a [u8]) -> Result + fn deserialize_from(buffer: &'a [u8]) -> Result where Self: Sized, { @@ -650,36 +718,36 @@ impl<'a> Value<'a> for &'a [u8] { } impl<'a, const N: usize> Value<'a> for [u8; N] { - fn serialize_into(&self, buffer: &mut [u8]) -> Result { + fn serialize_into(&self, buffer: &mut [u8]) -> Result { if buffer.len() < self.len() { - return Err(MapItemError::BufferTooSmall); + return Err(MapValueError::BufferTooSmall); } buffer[..self.len()].copy_from_slice(self); Ok(self.len()) } - fn deserialize_from(buffer: &'a [u8]) -> Result + fn deserialize_from(buffer: &'a [u8]) -> Result where Self: Sized, { - buffer.try_into().map_err(|_| MapItemError::BufferTooSmall) + buffer.try_into().map_err(|_| MapValueError::BufferTooSmall) } } macro_rules! impl_map_item_num { ($int:ty) => { impl<'a> Value<'a> for $int { - fn serialize_into(&self, buffer: &mut [u8]) -> Result { + fn serialize_into(&self, buffer: &mut [u8]) -> Result { buffer[..core::mem::size_of::()].copy_from_slice(&self.to_le_bytes()); Ok(core::mem::size_of::()) } - fn deserialize_from(buffer: &[u8]) -> Result { + fn deserialize_from(buffer: &[u8]) -> Result { Ok(Self::from_le_bytes( buffer[..core::mem::size_of::()] .try_into() - .map_err(|_| MapItemError::BufferTooSmall)?, + .map_err(|_| MapValueError::BufferTooSmall)?, )) } } @@ -699,19 +767,28 @@ impl_map_item_num!(i128); impl_map_item_num!(f32); impl_map_item_num!(f64); +/// Error for map value (de)serialization. +/// +/// This error type is predefined (in contrast to using generics) to save many kilobytes of binary size. #[non_exhaustive] #[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "defmt-03", derive(defmt::Format))] -pub enum MapItemError { +pub enum MapValueError { + /// The provided buffer was too small. BufferTooSmall, + /// The deserialization could not succeed because the bytes are in an invalid format. InvalidFormat, + /// An implementation defined error that might contain more information than the other predefined + /// error variants. + Custom(i32), } -impl core::fmt::Display for MapItemError { +impl core::fmt::Display for MapValueError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { - MapItemError::BufferTooSmall => write!(f, "Buffer too small"), - MapItemError::InvalidFormat => write!(f, "Invalid format"), + MapValueError::BufferTooSmall => write!(f, "Buffer too small"), + MapValueError::InvalidFormat => write!(f, "Invalid format"), + MapValueError::Custom(val) => write!(f, "Custom error: {val}"), } } } From c364cb535104c012715b43fa3f624b8e13303d29 Mon Sep 17 00:00:00 2001 From: Dion Dokter Date: Wed, 27 Mar 2024 11:39:46 +0100 Subject: [PATCH 10/10] Added some clarifying docs and an extra MapValueError variant that could come in handy --- src/map.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/map.rs b/src/map.rs index ef75f47..43d6c69 100644 --- a/src/map.rs +++ b/src/map.rs @@ -694,6 +694,9 @@ pub trait Value<'a> { fn serialize_into(&self, buffer: &mut [u8]) -> Result; /// Deserialize the value from the buffer. Because of the added lifetime, the implementation can borrow from the /// buffer which opens up some zero-copy possibilities. + /// + /// The buffer will be the same length as the serialize function returned for this value. Though note that the length + /// is written from flash, so bitflips can affect that (though the length is separately crc-protected). fn deserialize_from(buffer: &'a [u8]) -> Result where Self: Sized; @@ -776,6 +779,8 @@ impl_map_item_num!(f64); pub enum MapValueError { /// The provided buffer was too small. BufferTooSmall, + /// The serialization could not succeed because the data was not in order. (e.g. too big to fit) + InvalidData, /// The deserialization could not succeed because the bytes are in an invalid format. InvalidFormat, /// An implementation defined error that might contain more information than the other predefined @@ -787,6 +792,7 @@ impl core::fmt::Display for MapValueError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { MapValueError::BufferTooSmall => write!(f, "Buffer too small"), + MapValueError::InvalidData => write!(f, "Invalid data"), MapValueError::InvalidFormat => write!(f, "Invalid format"), MapValueError::Custom(val) => write!(f, "Custom error: {val}"), }