diff --git a/README.md b/README.md index e3e9de8..d513fe5 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,21 @@ If you're looking for an alternative with different tradeoffs, take a look at [e If for some reason an operation returns the corrupted error, then it might be repairable in many cases. See the repair functions in the map and queue modules for more info. +## Caching + +There are various cache options that speed up the operations. +By default (no cache) all state is stored in flash and the state has to be fully read every time. +Instead, we can optionally store some state in ram. + +These numbers are taken from the test cases in the cache module: + +| Name | Map # flash reads | Queue # flash reads | +| -------------: | ----------------: | ------------------: | +| NoCache | 100% | 100% | +| PageStateCache | 77% | 51% | + +***Note:** These are the number of reads, not the amount of bytes.* + ## Inner workings To save on erase cycles, this crate only really appends data to the pages. Exactly how this is done depends @@ -91,7 +106,11 @@ When using peek_many, you can look at all data from oldest to newest. (DD-MM-YY) ### Unreleased + - *Breaking* The item to store is now passed by reference to Map `store_item` +- *Breaking* Added cache options to the functions to speed up reading the state of the flash. + To retain the old behaviour you can pass the `NoCache` type as the cache parameter. +- Removed defmt logging since that wasn't being maintained. The format impl for the errors remain. ### 0.7.0 10-01-24 diff --git a/fuzz/fuzz_targets/map.rs b/fuzz/fuzz_targets/map.rs index 646d4e4..1d5d957 100644 --- a/fuzz/fuzz_targets/map.rs +++ b/fuzz/fuzz_targets/map.rs @@ -94,9 +94,12 @@ fn fuzz(ops: Input) { let mut flash = MockFlashBase::::new( WriteCountCheck::OnceOnly, Some(ops.fuel as u32), + true, ); const FLASH_RANGE: Range = 0x000..0x1000; + let mut cache = sequential_storage::cache::NoCache::new(); + let mut map = HashMap::new(); #[repr(align(4))] struct AlignedBuf([u8; 260]); @@ -123,6 +126,7 @@ fn fuzz(ops: Input) { match block_on(sequential_storage::map::store_item( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, &item, )) { @@ -137,6 +141,7 @@ fn fuzz(ops: Input) { match block_on(sequential_storage::map::fetch_item::( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, item.key, )) { @@ -167,6 +172,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::map::try_repair::( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, )) .unwrap(); @@ -180,6 +186,7 @@ fn fuzz(ops: Input) { match block_on(sequential_storage::map::fetch_item::( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, key, )) { @@ -213,6 +220,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::map::try_repair::( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, )) .unwrap(); diff --git a/fuzz/fuzz_targets/queue.rs b/fuzz/fuzz_targets/queue.rs index 5905c8c..fe02bba 100644 --- a/fuzz/fuzz_targets/queue.rs +++ b/fuzz/fuzz_targets/queue.rs @@ -45,9 +45,12 @@ fn fuzz(ops: Input) { let mut flash = MockFlashBase::::new( WriteCountCheck::Twice, Some(ops.fuel as u32), + true, ); const FLASH_RANGE: Range = 0x000..0x1000; + let mut cache = sequential_storage::cache::NoCache::new(); + let mut order = VecDeque::new(); let mut buf = AlignedBuf([0; MAX_VALUE_SIZE + 1]); @@ -73,6 +76,7 @@ fn fuzz(ops: Input) { let max_fit = match block_on(sequential_storage::queue::find_max_fit( &mut flash, FLASH_RANGE, + &mut cache, )) { Ok(val) => val, Err(Error::Corrupted { @@ -86,6 +90,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -99,6 +104,7 @@ fn fuzz(ops: Input) { match block_on(sequential_storage::queue::push( &mut flash, FLASH_RANGE, + &mut cache, &buf.0[..val.len()], false, )) { @@ -145,6 +151,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -157,6 +164,7 @@ fn fuzz(ops: Input) { match block_on(sequential_storage::queue::pop( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, )) { Ok(value) => { @@ -193,6 +201,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -205,6 +214,7 @@ fn fuzz(ops: Input) { let mut popper = match block_on(sequential_storage::queue::pop_many( &mut flash, FLASH_RANGE, + &mut cache, )) { Ok(val) => val, Err(Error::Corrupted { @@ -218,6 +228,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -267,6 +278,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -282,6 +294,7 @@ fn fuzz(ops: Input) { match block_on(sequential_storage::queue::peek( &mut flash, FLASH_RANGE, + &mut cache, &mut buf.0, )) { Ok(value) => { @@ -301,6 +314,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -313,6 +327,7 @@ fn fuzz(ops: Input) { let mut peeker = match block_on(sequential_storage::queue::peek_many( &mut flash, FLASH_RANGE, + &mut cache, )) { Ok(val) => val, Err(Error::Corrupted { @@ -326,6 +341,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; @@ -357,6 +373,7 @@ fn fuzz(ops: Input) { block_on(sequential_storage::queue::try_repair( &mut flash, FLASH_RANGE, + &mut cache, )) .unwrap(); corruption_repaired = true; diff --git a/src/cache.rs b/src/cache.rs new file mode 100644 index 0000000..ee31aae --- /dev/null +++ b/src/cache.rs @@ -0,0 +1,390 @@ +//! Module implementing all things cache related + +use crate::PageState; + +/// Trait implemented by all cache types +#[allow(private_bounds)] +pub trait CacheImpl: PrivateCacheImpl {} + +impl CacheImpl for &mut T {} + +impl CacheImpl for Cache {} + +pub(crate) trait PrivateCacheImpl { + type PSC: PageStatesCache; + + fn inner(&mut self) -> &mut Cache; +} + +impl PrivateCacheImpl for &mut T { + type PSC = T::PSC; + + fn inner(&mut self) -> &mut Cache { + T::inner(self) + } +} + +impl PrivateCacheImpl for Cache { + type PSC = PSC; + + fn inner(&mut self) -> &mut Cache { + self + } +} + +/// A cache object implementing no cache. +/// +/// This type of cache doesn't have to be kept around and may be constructed on every api call. +/// You could simply pass `&mut NoCache::new()` every time. +pub struct NoCache(Cache); + +impl NoCache { + /// Construct a new instance + pub const fn new() -> Self { + Self(Cache::new(UncachedPageSates)) + } +} + +impl PrivateCacheImpl for NoCache { + type PSC = UncachedPageSates; + + fn inner(&mut self) -> &mut Cache { + &mut self.0 + } +} + +impl CacheImpl for NoCache {} + +/// A cache object that keeps track of the page states. +/// +/// This cache has to be kept around and passed to *every* api call to the same memory region until the cache gets discarded. +/// +/// Valid usecase: +/// `Create cache 1` -> `use 1` -> `use 1` -> `create cache 2` -> `use 2` -> `use 2` +/// +/// Invalid usecase: +/// `Create cache 1` -> `use 1` -> `create cache 2` -> `use 2` -> `❌ use 1 ❌` +/// +/// Make sure the page count is correct. If the number is lower than the actual amount, the code will panic at some point. +pub struct PageStateCache(Cache>); + +impl PageStateCache { + /// Construct a new instance + pub const fn new() -> Self { + Self(Cache::new(CachedPageStates::new())) + } +} + +impl PrivateCacheImpl for PageStateCache { + type PSC = CachedPageStates; + + fn inner(&mut self) -> &mut Cache { + &mut self.0 + } +} + +impl CacheImpl for PageStateCache {} + +#[derive(Debug)] +pub(crate) struct Cache { + dirty: bool, + page_states: PSC, +} + +impl Cache { + pub(crate) const fn new(page_states: PSC) -> Self { + Self { + dirty: false, + page_states, + } + } + + pub(crate) fn is_dirty(&self) -> bool { + self.dirty + } + + pub(crate) fn mark_dirty(&mut self) { + self.dirty = true; + } + + pub(crate) fn unmark_dirty(&mut self) { + self.dirty = false; + } + + pub(crate) fn invalidate_cache_state(&mut self) { + self.dirty = false; + self.page_states.invalidate_cache_state(); + } + + pub(crate) fn get_page_state(&self, page_index: usize) -> Option { + self.page_states.get_page_state(page_index) + } + + pub(crate) fn notice_page_state(&mut self, page_index: usize, new_state: PageState) { + self.mark_dirty(); + self.page_states.notice_page_state(page_index, new_state) + } +} + +pub(crate) trait PageStatesCache { + fn get_page_state(&self, page_index: usize) -> Option; + fn notice_page_state(&mut self, page_index: usize, new_state: PageState); + fn invalidate_cache_state(&mut self); +} + +#[derive(Debug)] +pub(crate) struct CachedPageStates { + pages: [Option; PAGE_COUNT], +} + +impl CachedPageStates { + pub const fn new() -> Self { + Self { + pages: [None; PAGE_COUNT], + } + } +} + +impl PageStatesCache for CachedPageStates { + fn get_page_state(&self, page_index: usize) -> Option { + self.pages[page_index] + } + + fn notice_page_state(&mut self, page_index: usize, new_state: PageState) { + self.pages[page_index] = Some(new_state); + } + + fn invalidate_cache_state(&mut self) { + *self = Self::new(); + } +} + +#[derive(Debug, Default)] +pub(crate) struct UncachedPageSates; + +impl PageStatesCache for UncachedPageSates { + fn get_page_state(&self, _page_index: usize) -> Option { + None + } + + fn notice_page_state(&mut self, _page_index: usize, _new_state: PageState) {} + + fn invalidate_cache_state(&mut self) {} +} + +#[cfg(test)] +mod queue_tests { + use core::ops::Range; + + use crate::{ + mock_flash::{self, WriteCountCheck}, + queue::{peek, pop, push}, + AlignedBuf, + }; + + use super::*; + use futures_test::test; + + const NUM_PAGES: usize = 4; + const LOOP_COUNT: usize = 2000; + + #[test] + async fn no_cache() { + assert_eq!(run_test(&mut NoCache::new()).await, (594934, 6299, 146)); + } + + #[test] + async fn page_state_cache() { + assert_eq!( + run_test(&mut PageStateCache::::new()).await, + (308740, 6299, 146) + ); + } + + async fn run_test(mut cache: impl CacheImpl) -> (u32, u32, u32) { + let mut flash = + mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); + const FLASH_RANGE: Range = 0x00..0x400; + let mut data_buffer = AlignedBuf([0; 1024]); + + for i in 0..LOOP_COUNT { + println!("{i}"); + let data = vec![i as u8; i % 20 + 1]; + + println!("PUSH"); + push(&mut flash, FLASH_RANGE, &mut cache, &data, true) + .await + .unwrap(); + assert_eq!( + &peek(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer) + .await + .unwrap() + .unwrap()[..], + &data, + "At {i}" + ); + println!("POP"); + assert_eq!( + &pop(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer) + .await + .unwrap() + .unwrap()[..], + &data, + "At {i}" + ); + println!("PEEK"); + assert_eq!( + peek(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer) + .await + .unwrap(), + None, + "At {i}" + ); + println!("DONE"); + } + + (flash.reads, flash.writes, flash.erases) + } +} + +#[cfg(test)] +mod map_tests { + use core::ops::Range; + + use crate::{ + map::{fetch_item, store_item, StorageItem}, + mock_flash::{self, WriteCountCheck}, + AlignedBuf, + }; + + use super::*; + use futures_test::test; + + const NUM_PAGES: usize = 4; + + #[test] + async fn no_cache() { + assert_eq!(run_test(&mut NoCache::new()).await, (224161, 5201, 198)); + } + + #[test] + async fn page_state_cache() { + assert_eq!( + run_test(&mut PageStateCache::::new()).await, + (172831, 5201, 198) + ); + } + + #[derive(Debug, PartialEq, Eq)] + struct MockStorageItem { + key: u8, + value: Vec, + } + + #[derive(Debug, PartialEq, Eq)] + enum MockStorageItemError { + BufferTooSmall, + InvalidKey, + BufferTooBig, + } + + impl StorageItem for MockStorageItem { + type Key = u8; + + type Error = MockStorageItemError; + + fn serialize_into(&self, buffer: &mut [u8]) -> Result { + if buffer.len() < 2 + self.value.len() { + return Err(MockStorageItemError::BufferTooSmall); + } + + if self.value.len() > 255 { + return Err(MockStorageItemError::BufferTooBig); + } + + // The serialized value must not be all 0xFF + if self.key == 0xFF { + return Err(MockStorageItemError::InvalidKey); + } + + buffer[0] = self.key; + buffer[1] = self.value.len() as u8; + buffer[2..][..self.value.len()].copy_from_slice(&self.value); + + Ok(2 + self.value.len()) + } + + fn deserialize_from(buffer: &[u8]) -> Result + where + Self: Sized, + { + if buffer.len() < 2 { + return Err(MockStorageItemError::BufferTooSmall); + } + + if buffer[0] == 0xFF { + return Err(MockStorageItemError::InvalidKey); + } + + let len = buffer[1]; + + if buffer.len() < 2 + len as usize { + return Err(MockStorageItemError::BufferTooSmall); + } + + Ok(Self { + key: buffer[0], + value: buffer[2..][..len as usize].to_vec(), + }) + } + + fn key(&self) -> Self::Key { + self.key + } + } + + async fn run_test(mut cache: impl CacheImpl) -> (u32, u32, u32) { + let mut cache = cache.inner(); + + let mut flash = + mock_flash::MockFlashBase::::new(WriteCountCheck::Twice, None, true); + const FLASH_RANGE: Range = 0x00..0x400; + let mut data_buffer = AlignedBuf([0; 128]); + + const LENGHT_PER_KEY: [usize; 24] = [ + 11, 13, 6, 13, 13, 10, 2, 3, 5, 36, 1, 65, 4, 6, 1, 15, 10, 7, 3, 15, 9, 3, 4, 5, + ]; + + for _ in 0..100 { + for i in 0..24 { + let item = MockStorageItem { + key: i as u8, + value: vec![i as u8; LENGHT_PER_KEY[i]], + }; + + store_item::<_, _>(&mut flash, FLASH_RANGE, &mut cache, &mut data_buffer, &item) + .await + .unwrap(); + } + + for i in 0..24 { + let item = fetch_item::( + &mut flash, + FLASH_RANGE, + &mut cache, + &mut data_buffer, + i as u8, + ) + .await + .unwrap() + .unwrap(); + + println!("Fetched {item:?}"); + + assert_eq!(item.value, vec![i as u8; LENGHT_PER_KEY[i]]); + } + } + + (flash.reads, flash.writes, flash.erases) + } +} diff --git a/src/item.rs b/src/item.rs index 6f7baad..6c136af 100644 --- a/src/item.rs +++ b/src/item.rs @@ -27,6 +27,7 @@ use core::ops::Range; use embedded_storage_async::nor_flash::{MultiwriteNorFlash, NorFlash}; use crate::{ + cache::{Cache, PageStatesCache}, calculate_page_address, calculate_page_end_address, get_page_state, round_down_to_alignment, round_down_to_alignment_usize, round_up_to_alignment, round_up_to_alignment_usize, AlignedBuf, Error, NorFlashExt, PageState, MAX_WORD_SIZE, @@ -404,12 +405,13 @@ fn crc32_with_initial(data: &[u8], initial: u32) -> u32 { pub async fn is_page_empty( flash: &mut S, flash_range: Range, + cache: &mut Cache, page_index: usize, page_state: Option, ) -> Result> { let page_state = match page_state { Some(page_state) => page_state, - None => get_page_state::(flash, flash_range.clone(), page_index).await?, + None => get_page_state::(flash, flash_range.clone(), cache, page_index).await?, }; match page_state { @@ -456,15 +458,7 @@ impl ItemIter { .read_item(flash, buffer, address, self.header.end_address) .await? { - MaybeItem::Corrupted(_, buffer) => { - #[cfg(feature = "defmt")] - defmt::error!( - "Found a corrupted item at {:X}. Skipping...", - self.header.current_address - ); - data_buffer.replace(buffer); - } - MaybeItem::Erased(_, buffer) => { + MaybeItem::Corrupted(_, buffer) | MaybeItem::Erased(_, buffer) => { data_buffer.replace(buffer); } MaybeItem::Present(item) => { diff --git a/src/lib.rs b/src/lib.rs index fcc6670..158c3a3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,12 +7,16 @@ // - flash erase size is quite big, aka, this is a paged flash // - flash write size is quite small, so it writes words and not full pages +use cache::{Cache, PageStatesCache}; use core::{ fmt::Debug, ops::{Deref, DerefMut, Range}, }; use embedded_storage_async::nor_flash::NorFlash; +use crate::cache::NoCache; + +pub mod cache; mod item; pub mod map; pub mod queue; @@ -47,24 +51,28 @@ async fn try_general_repair( flash: &mut S, flash_range: Range, ) -> Result<(), Error> { + use crate::cache::PrivateCacheImpl; + // Loop through the pages and get their state. If one returns the corrupted error, // the page is likely half-erased. Fix for that is to re-erase again to hopefully finish the job. for page_index in get_pages::(flash_range.clone(), 0) { if matches!( - get_page_state(flash, flash_range.clone(), page_index).await, + get_page_state( + flash, + flash_range.clone(), + NoCache::new().inner(), + page_index + ) + .await, Err(Error::Corrupted { .. }) ) { - flash - .erase( - calculate_page_address::(flash_range.clone(), page_index), - calculate_page_end_address::(flash_range.clone(), page_index), - ) - .await - .map_err(|e| Error::Storage { - value: e, - #[cfg(feature = "_test")] - backtrace: std::backtrace::Backtrace::capture(), - })?; + open_page( + flash, + flash_range.clone(), + NoCache::new().inner(), + page_index, + ) + .await?; } } @@ -77,11 +85,12 @@ async fn try_general_repair( async fn find_first_page( flash: &mut S, flash_range: Range, + cache: &mut Cache, starting_page_index: usize, page_state: PageState, ) -> Result, Error> { for page_index in get_pages::(flash_range.clone(), starting_page_index) { - if page_state == get_page_state::(flash, flash_range.clone(), page_index).await? { + if page_state == get_page_state::(flash, flash_range.clone(), cache, page_index).await? { return Ok(Some(page_index)); } } @@ -141,8 +150,13 @@ const MARKER: u8 = 0; async fn get_page_state( flash: &mut S, flash_range: Range, + cache: &mut Cache, page_index: usize, ) -> Result> { + if let Some(cached_page_state) = cache.get_page_state(page_index) { + return Ok(cached_page_state); + } + let page_address = calculate_page_address::(flash_range, page_index); /// We only care about the data in the first byte to aid shutdown/cancellation. /// But we also don't want it to be too too definitive because we want to survive the occasional bitflip. @@ -181,30 +195,64 @@ async fn get_page_state( .sum::() >= HALF_MARKER_BITS; - match (start_marked, end_marked) { - (true, true) => Ok(PageState::Closed), - (true, false) => Ok(PageState::PartialOpen), + let discovered_state = match (start_marked, end_marked) { + (true, true) => PageState::Closed, + (true, false) => PageState::PartialOpen, // Probably an interrupted erase - (false, true) => Err(Error::Corrupted { + (false, true) => { + return Err(Error::Corrupted { + #[cfg(feature = "_test")] + backtrace: std::backtrace::Backtrace::capture(), + }) + } + (false, false) => PageState::Open, + }; + + cache.notice_page_state(page_index, discovered_state); + + Ok(discovered_state) +} + +/// Erase the page to open it again +async fn open_page( + flash: &mut S, + flash_range: Range, + cache: &mut Cache, + page_index: usize, +) -> Result<(), Error> { + cache.notice_page_state(page_index, PageState::Open); + + flash + .erase( + calculate_page_address::(flash_range.clone(), page_index), + calculate_page_end_address::(flash_range.clone(), page_index), + ) + .await + .map_err(|e| Error::Storage { + value: e, #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), - }), - (false, false) => Ok(PageState::Open), - } + })?; + + Ok(()) } /// Fully closes a page by writing both the start and end marker async fn close_page( flash: &mut S, flash_range: Range, + cache: &mut Cache, page_index: usize, ) -> Result<(), Error> { - let current_state = partial_close_page::(flash, flash_range.clone(), page_index).await?; + let current_state = + partial_close_page::(flash, flash_range.clone(), cache, page_index).await?; if current_state != PageState::PartialOpen { return Ok(()); } + cache.notice_page_state(page_index, PageState::Closed); + let buffer = AlignedBuf([MARKER; MAX_WORD_SIZE]); // Close the end marker flash @@ -226,14 +274,23 @@ async fn close_page( async fn partial_close_page( flash: &mut S, flash_range: Range, + cache: &mut Cache, page_index: usize, ) -> Result> { - let current_state = get_page_state::(flash, flash_range.clone(), page_index).await?; + let current_state = get_page_state::(flash, flash_range.clone(), cache, page_index).await?; if current_state != PageState::Open { return Ok(current_state); } + let new_state = match current_state { + PageState::Closed => PageState::Closed, + PageState::PartialOpen => PageState::PartialOpen, + PageState::Open => PageState::PartialOpen, + }; + + cache.notice_page_state(page_index, new_state); + let buffer = AlignedBuf([MARKER; MAX_WORD_SIZE]); // Close the start marker flash @@ -248,11 +305,7 @@ async fn partial_close_page( backtrace: std::backtrace::Backtrace::capture(), })?; - Ok(match current_state { - PageState::Closed => PageState::Closed, - PageState::PartialOpen => PageState::PartialOpen, - PageState::Open => PageState::PartialOpen, - }) + Ok(new_state) } /// The state of a page @@ -380,6 +433,7 @@ impl NorFlashExt for S { #[cfg(test)] mod tests { use super::*; + use crate::cache::PrivateCacheImpl; use futures_test::test; type MockFlash = mock_flash::MockFlashBase<4, 4, 64>; @@ -423,70 +477,136 @@ mod tests { .unwrap(); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 0, PageState::Open) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 0, + PageState::Open + ) + .await + .unwrap(), Some(3) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 0, PageState::PartialOpen) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 0, + PageState::PartialOpen + ) + .await + .unwrap(), Some(2) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 1, PageState::PartialOpen) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 1, + PageState::PartialOpen + ) + .await + .unwrap(), Some(2) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 2, PageState::PartialOpen) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 2, + PageState::PartialOpen + ) + .await + .unwrap(), Some(2) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 3, PageState::Open) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 3, + PageState::Open + ) + .await + .unwrap(), Some(3) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x200, 0, PageState::PartialOpen) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x200, + cache::NoCache::new().inner(), + 0, + PageState::PartialOpen + ) + .await + .unwrap(), None ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 0, PageState::Closed) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 0, + PageState::Closed + ) + .await + .unwrap(), Some(0) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 1, PageState::Closed) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 1, + PageState::Closed + ) + .await + .unwrap(), Some(1) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 2, PageState::Closed) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 2, + PageState::Closed + ) + .await + .unwrap(), Some(0) ); assert_eq!( - find_first_page(&mut flash, 0x000..0x400, 3, PageState::Closed) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x000..0x400, + cache::NoCache::new().inner(), + 3, + PageState::Closed + ) + .await + .unwrap(), Some(0) ); assert_eq!( - find_first_page(&mut flash, 0x200..0x400, 0, PageState::Closed) - .await - .unwrap(), + find_first_page( + &mut flash, + 0x200..0x400, + cache::NoCache::new().inner(), + 0, + PageState::Closed + ) + .await + .unwrap(), None ); } diff --git a/src/map.rs b/src/map.rs index 1846c36..117fb74 100644 --- a/src/map.rs +++ b/src/map.rs @@ -9,12 +9,16 @@ //! //! ```rust //! # use sequential_storage::map::{store_item, fetch_item, StorageItem}; +//! # use sequential_storage::cache::NoCache; //! # use mock_flash::MockFlashBase; //! # use futures::executor::block_on; //! # type Flash = MockFlashBase<10, 1, 4096>; //! # mod mock_flash { //! # include!("mock_flash.rs"); //! # } +//! # fn init_flash() -> Flash { +//! # Flash::new(mock_flash::WriteCountCheck::Twice, None, false) +//! # } //! // We create the type we want to store in this part of flash. //! // It itself must contain the key and the value. //! // On this part of flash, we must only call the functions using this type. @@ -62,7 +66,7 @@ //! //! # block_on(async { //! // Initialize the flash. This can be internal or external -//! let mut flash = Flash::default(); +//! let mut flash = init_flash(); //! // These are the flash addresses in which the crate will operate. //! // The crate will not read, write or erase outside of this range. //! let flash_range = 0x1000..0x3000; @@ -70,7 +74,7 @@ //! // It must be big enough to serialize the biggest value of your storage type in, //! // rounded up to to word alignment of the flash. Some kinds of flash may require //! // this buffer to be aligned in RAM as well. -//! let mut data_buffer = [0; 100]; +//! let mut data_buffer = [0; 128]; //! //! // We can fetch an item from the flash. //! // Nothing is stored in it yet, so it will return None. @@ -79,6 +83,7 @@ //! fetch_item::( //! &mut flash, //! flash_range.clone(), +//! NoCache::new(), //! &mut data_buffer, //! 42, //! ).await.unwrap(), @@ -90,6 +95,7 @@ //! store_item::( //! &mut flash, //! flash_range.clone(), +//! NoCache::new(), //! &mut data_buffer, //! &MyCustomType { key: 42, data: 104729 }, //! ).await.unwrap(); @@ -100,6 +106,7 @@ //! fetch_item::( //! &mut flash, //! flash_range.clone(), +//! NoCache::new(), //! &mut data_buffer, //! 42, //! ).await.unwrap(), @@ -108,7 +115,12 @@ //! # }); //! ``` -use crate::item::{find_next_free_item_spot, Item, ItemHeader, ItemIter}; +use crate::{ + cache::Cache, + item::{find_next_free_item_spot, Item, ItemHeader, ItemIter}, +}; + +use self::cache::{CacheImpl, PrivateCacheImpl}; use super::*; @@ -125,11 +137,12 @@ use super::*; pub async fn fetch_item( flash: &mut S, flash_range: Range, + mut cache: impl CacheImpl, data_buffer: &mut [u8], search_key: I::Key, ) -> Result, MapError> { Ok( - fetch_item_with_location(flash, flash_range, data_buffer, search_key) + fetch_item_with_location(flash, flash_range, cache.inner(), data_buffer, search_key) .await? .map(|(item, _, _)| item), ) @@ -140,6 +153,7 @@ pub async fn fetch_item( async fn fetch_item_with_location( flash: &mut S, flash_range: Range, + cache: &mut Cache, data_buffer: &mut [u8], search_key: I::Key, ) -> Result, MapError> { @@ -150,21 +164,22 @@ async fn fetch_item_with_location( assert!(S::ERASE_SIZE >= S::WORD_SIZE * 3); assert!(S::WORD_SIZE <= MAX_WORD_SIZE); + if cache.is_dirty() { + cache.invalidate_cache_state(); + } + // We need to find the page we were last using. This should be the only partial open page. let mut last_used_page = - find_first_page(flash, flash_range.clone(), 0, PageState::PartialOpen).await?; - - #[cfg(feature = "defmt")] - defmt::trace!("Fetch item, last used page: {}", last_used_page); + find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await?; if last_used_page.is_none() { // In the event that all pages are still open or the last used page was just closed, we search for the first open page. // If the page one before that is closed, then that's the last used page. if let Some(first_open_page) = - find_first_page(flash, flash_range.clone(), 0, PageState::Open).await? + find_first_page(flash, flash_range.clone(), cache, 0, PageState::Open).await? { let previous_page = previous_page::(flash_range.clone(), first_open_page); - if get_page_state(flash, flash_range.clone(), previous_page) + if get_page_state(flash, flash_range.clone(), cache, previous_page) .await? .is_closed() { @@ -172,6 +187,7 @@ async fn fetch_item_with_location( } else { // The page before the open page is not closed, so it must be open. // This means that all pages are open and that we don't have any items yet. + cache.unmark_dirty(); return Ok(None); } } else { @@ -218,14 +234,18 @@ async fn fetch_item_with_location( // We have not found the item. We've got to look in the previous page, but only if that page is closed and contains data. let previous_page = previous_page::(flash_range.clone(), current_page_to_check); - if get_page_state(flash, flash_range.clone(), previous_page).await? != PageState::Closed { + if get_page_state(flash, flash_range.clone(), cache, previous_page).await? + != PageState::Closed + { // We've looked through all the pages with data and couldn't find the item + cache.unmark_dirty(); return Ok(None); } current_page_to_check = previous_page; } + cache.unmark_dirty(); Ok(newest_found_item) } @@ -240,38 +260,41 @@ async fn fetch_item_with_location( pub async fn store_item( flash: &mut S, flash_range: Range, + mut cache: impl CacheImpl, data_buffer: &mut [u8], item: &I, ) -> Result<(), MapError> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); - assert!(flash_range.len() / S::ERASE_SIZE >= 2); + assert!(flash_range.end - flash_range.start >= S::ERASE_SIZE as u32 * 2); assert!(S::ERASE_SIZE >= S::WORD_SIZE * 3); assert!(S::WORD_SIZE <= MAX_WORD_SIZE); + let cache = cache.inner(); + + if cache.is_dirty() { + cache.invalidate_cache_state(); + } + let mut recursion_level = 0; loop { - #[cfg(feature = "defmt")] - defmt::trace!("Store item inner. Recursion: {}", recursion_level); - - // Check if we're in an infinite recursion which happens when + // Check if we're in an infinite recursion which happens when we don't have enough space to store the new data if recursion_level == get_pages::(flash_range.clone(), 0).count() { + cache.unmark_dirty(); return Err(MapError::FullStorage); } // If there is a partial open page, we try to write in that first if there is enough space let next_page_to_use = if let Some(partial_open_page) = - find_first_page(flash, flash_range.clone(), 0, PageState::PartialOpen).await? + find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await? { - #[cfg(feature = "defmt")] - defmt::trace!("Partial open page found: {}", partial_open_page); - // We found a partial open page, but at this point it's relatively cheap to do a consistency check if !get_page_state( flash, flash_range.clone(), + cache, next_page::(flash_range.clone(), partial_open_page), ) .await? @@ -311,20 +334,12 @@ pub async fn store_item( Item::write_new(flash, free_spot_address, &data_buffer[..item_data_length]) .await?; - #[cfg(feature = "defmt")] - defmt::trace!("Item has been written ok"); - + cache.unmark_dirty(); return Ok(()); } None => { - #[cfg(feature = "defmt")] - defmt::trace!( - "Partial open page is too small. Closing it now: {}", - partial_open_page - ); - // The item doesn't fit here, so we need to close this page and move to the next - close_page(flash, flash_range.clone(), partial_open_page).await?; + close_page(flash, flash_range.clone(), cache, partial_open_page).await?; Some(next_page::(flash_range.clone(), partial_open_page)) } } @@ -337,13 +352,10 @@ pub async fn store_item( // The new buffer page has to be emptied if it was closed. // If there was no partial page, we just use the first open page. - #[cfg(feature = "defmt")] - defmt::trace!("Next page to use: {}", next_page_to_use); - match next_page_to_use { Some(next_page_to_use) => { let next_page_state = - get_page_state(flash, flash_range.clone(), next_page_to_use).await?; + get_page_state(flash, flash_range.clone(), cache, next_page_to_use).await?; if !next_page_state.is_open() { // What was the previous buffer page was not open... @@ -356,16 +368,17 @@ pub async fn store_item( // Since we're gonna write data here, let's already partially close the page // This could be done after moving the data, but this is more robust in the // face of shutdowns and cancellations - partial_close_page(flash, flash_range.clone(), next_page_to_use).await?; + partial_close_page(flash, flash_range.clone(), cache, next_page_to_use).await?; let next_buffer_page = next_page::(flash_range.clone(), next_page_to_use); let next_buffer_page_state = - get_page_state(flash, flash_range.clone(), next_buffer_page).await?; + get_page_state(flash, flash_range.clone(), cache, next_buffer_page).await?; if !next_buffer_page_state.is_open() { migrate_items::( flash, flash_range.clone(), + cache, data_buffer, next_buffer_page, next_page_to_use, @@ -376,14 +389,11 @@ pub async fn store_item( None => { // There's no partial open page, so we just gotta turn the first open page into a partial open one let first_open_page = - match find_first_page(flash, flash_range.clone(), 0, PageState::Open).await? { + match find_first_page(flash, flash_range.clone(), cache, 0, PageState::Open) + .await? + { Some(first_open_page) => first_open_page, None => { - #[cfg(feature = "defmt")] - defmt::error!( - "No open pages found for sequential storage in the range: {}", - flash_range - ); // Uh oh, no open pages. // Something has gone wrong. // We should never get here. @@ -394,7 +404,7 @@ pub async fn store_item( } }; - partial_close_page(flash, flash_range.clone(), first_open_page).await?; + partial_close_page(flash, flash_range.clone(), cache, first_open_page).await?; } } @@ -511,6 +521,7 @@ impl PartialEq for MapError { async fn migrate_items( flash: &mut S, flash_range: Range, + cache: &mut Cache, data_buffer: &mut [u8], source_page: usize, target_page: usize, @@ -531,7 +542,8 @@ async fn migrate_items( // Search for the newest item with the key we found let Some((_, found_address, _)) = - fetch_item_with_location::(flash, flash_range.clone(), data_buffer, key).await? + fetch_item_with_location::(flash, flash_range.clone(), cache, data_buffer, key) + .await? else { // We couldn't even find our own item? return Err(MapError::Corrupted { @@ -552,17 +564,7 @@ async fn migrate_items( } } - flash - .erase( - calculate_page_address::(flash_range.clone(), source_page), - calculate_page_end_address::(flash_range.clone(), source_page), - ) - .await - .map_err(|e| MapError::Storage { - value: e, - #[cfg(feature = "_test")] - backtrace: std::backtrace::Backtrace::capture(), - })?; + open_page(flash, flash_range.clone(), cache, source_page).await?; Ok(()) } @@ -580,39 +582,60 @@ async fn migrate_items( pub async fn try_repair( flash: &mut S, flash_range: Range, + mut cache: impl CacheImpl, data_buffer: &mut [u8], ) -> Result<(), MapError> { + let cache = cache.inner(); + + cache.invalidate_cache_state(); + #[allow(dropping_references)] + drop(cache); + crate::try_general_repair(flash, flash_range.clone()).await?; // Let's check if we corrupted in the middle of a migration - if let Some(partial_open_page) = - find_first_page(flash, flash_range.clone(), 0, PageState::PartialOpen).await? + if let Some(partial_open_page) = find_first_page( + flash, + flash_range.clone(), + cache::NoCache::new().inner(), + 0, + PageState::PartialOpen, + ) + .await? { let buffer_page = next_page::(flash_range.clone(), partial_open_page); - if !get_page_state(flash, flash_range.clone(), buffer_page) - .await? - .is_open() + if !get_page_state( + flash, + flash_range.clone(), + cache::NoCache::new().inner(), + buffer_page, + ) + .await? + .is_open() { // Yes, the migration got interrupted. Let's redo it. // To do that, we erase the partial open page first because it contains incomplete data. - flash - .erase( - calculate_page_address::(flash_range.clone(), partial_open_page), - calculate_page_end_address::(flash_range.clone(), partial_open_page), - ) - .await - .map_err(|e| MapError::Storage { - value: e, - #[cfg(feature = "_test")] - backtrace: std::backtrace::Backtrace::capture(), - })?; + open_page( + flash, + flash_range.clone(), + cache::NoCache::new().inner(), + partial_open_page, + ) + .await?; // Then partially close it again - partial_close_page(flash, flash_range.clone(), partial_open_page).await?; + partial_close_page( + flash, + flash_range.clone(), + cache::NoCache::new().inner(), + partial_open_page, + ) + .await?; migrate_items::( flash, flash_range.clone(), + cache::NoCache::new().inner(), data_buffer, buffer_page, partial_open_page, @@ -707,21 +730,32 @@ mod tests { let mut data_buffer = AlignedBuf([0; 128]); - let item = - fetch_item::(&mut flash, flash_range.clone(), &mut data_buffer, 0) - .await - .unwrap(); + let item = fetch_item::( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer, + 0, + ) + .await + .unwrap(); assert_eq!(item, None); - let item = - fetch_item::(&mut flash, flash_range.clone(), &mut data_buffer, 60) - .await - .unwrap(); + let item = fetch_item::( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer, + 60, + ) + .await + .unwrap(); assert_eq!(item, None); let item = fetch_item::( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, 0xFF, ) @@ -732,6 +766,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 0, @@ -743,6 +778,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 0, @@ -752,17 +788,23 @@ mod tests { .await .unwrap(); - let item = - fetch_item::(&mut flash, flash_range.clone(), &mut data_buffer, 0) - .await - .unwrap() - .unwrap(); + let item = fetch_item::( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer, + 0, + ) + .await + .unwrap() + .unwrap(); assert_eq!(item.key, 0); assert_eq!(item.value, vec![5, 6]); store_item::<_, _>( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 1, @@ -772,19 +814,29 @@ mod tests { .await .unwrap(); - let item = - fetch_item::(&mut flash, flash_range.clone(), &mut data_buffer, 0) - .await - .unwrap() - .unwrap(); + let item = fetch_item::( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer, + 0, + ) + .await + .unwrap() + .unwrap(); assert_eq!(item.key, 0); assert_eq!(item.value, vec![5, 6]); - let item = - fetch_item::(&mut flash, flash_range.clone(), &mut data_buffer, 1) - .await - .unwrap() - .unwrap(); + let item = fetch_item::( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer, + 1, + ) + .await + .unwrap() + .unwrap(); assert_eq!(item.key, 1); assert_eq!(item.value, vec![2, 2, 2, 2, 2, 2]); @@ -792,6 +844,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: (index % 10) as u8, @@ -806,6 +859,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, i, ) @@ -820,6 +874,7 @@ mod tests { store_item::<_, _>( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: 11, @@ -834,6 +889,7 @@ mod tests { let item = fetch_item::( &mut flash, flash_range.clone(), + cache::NoCache::new(), &mut data_buffer, i, ) @@ -864,15 +920,22 @@ mod tests { }; println!("Storing {item:?}"); - store_item::<_, _>(&mut tiny_flash, 0x00..0x40, &mut data_buffer, &item) - .await - .unwrap(); + store_item::<_, _>( + &mut tiny_flash, + 0x00..0x40, + cache::NoCache::new(), + &mut data_buffer, + &item, + ) + .await + .unwrap(); } assert_eq!( store_item::<_, _>( &mut tiny_flash, 0x00..0x40, + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: UPPER_BOUND, @@ -887,6 +950,7 @@ mod tests { let item = fetch_item::( &mut tiny_flash, 0x00..0x40, + cache::NoCache::new(), &mut data_buffer, i as u8, ) @@ -914,15 +978,22 @@ mod tests { }; println!("Storing {item:?}"); - store_item::<_, _>(&mut big_flash, 0x0000..0x1000, &mut data_buffer, &item) - .await - .unwrap(); + store_item::<_, _>( + &mut big_flash, + 0x0000..0x1000, + cache::NoCache::new(), + &mut data_buffer, + &item, + ) + .await + .unwrap(); } assert_eq!( store_item::<_, _>( &mut big_flash, 0x0000..0x1000, + cache::NoCache::new(), &mut data_buffer, &MockStorageItem { key: UPPER_BOUND, @@ -937,6 +1008,7 @@ mod tests { let item = fetch_item::( &mut big_flash, 0x0000..0x1000, + cache::NoCache::new(), &mut data_buffer, i as u8, ) @@ -966,9 +1038,15 @@ mod tests { value: vec![i as u8; LENGHT_PER_KEY[i]], }; - store_item::<_, _>(&mut flash, 0x0000..0x4000, &mut data_buffer, &item) - .await - .unwrap(); + store_item::<_, _>( + &mut flash, + 0x0000..0x4000, + cache::NoCache::new(), + &mut data_buffer, + &item, + ) + .await + .unwrap(); } } @@ -976,6 +1054,7 @@ mod tests { let item = fetch_item::( &mut flash, 0x0000..0x4000, + cache::NoCache::new(), &mut data_buffer, i as u8, ) diff --git a/src/mock_flash.rs b/src/mock_flash.rs index d5f575a..a711081 100644 --- a/src/mock_flash.rs +++ b/src/mock_flash.rs @@ -31,13 +31,15 @@ pub struct MockFlashBase, + /// When true, write buffers have to be aligned + pub alignment_check: bool, } impl Default for MockFlashBase { fn default() -> Self { - Self::new(WriteCountCheck::OnceOnly, None) + Self::new(WriteCountCheck::OnceOnly, None, true) } } @@ -53,7 +55,11 @@ impl pub const FULL_FLASH_RANGE: Range = 0..(PAGES * PAGE_WORDS * BYTES_PER_WORD) as u32; /// Create a new flash instance. - pub fn new(write_count_check: WriteCountCheck, bytes_until_shutoff: Option) -> Self { + pub fn new( + write_count_check: WriteCountCheck, + bytes_until_shutoff: Option, + alignment_check: bool, + ) -> Self { Self { writable: vec![T; Self::CAPACITY_WORDS], data: vec![u8::MAX; Self::CAPACITY_BYTES], @@ -62,6 +68,7 @@ impl writes: 0, write_count_check, bytes_until_shutoff, + alignment_check, } } @@ -105,6 +112,7 @@ impl #[cfg(feature = "_test")] /// Print all items in flash to the returned string pub fn print_items(&mut self) -> String { + use crate::cache::{NoCache, PrivateCacheImpl}; use crate::NorFlashExt; use futures::executor::block_on; use std::fmt::Write; @@ -123,6 +131,7 @@ impl match block_on(crate::get_page_state( self, Self::FULL_FLASH_RANGE, + NoCache::new().inner(), page_index )) { Ok(value) => format!("{value:?}"), @@ -288,7 +297,7 @@ impl N // Check alignment. Some flash types are strict about the alignment of the input buffer. This ensures // that the mock flash is also strict to catch bugs and avoid regressions. - if bytes.as_ptr() as usize % 4 != 0 { + if self.alignment_check && bytes.as_ptr() as usize % 4 != 0 { panic!("write buffer must be aligned to 4 bytes"); } diff --git a/src/queue.rs b/src/queue.rs index 8021f1a..cc00177 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -4,6 +4,7 @@ //! //! ```rust //! # use sequential_storage::queue::{push, peek, pop}; +//! # use sequential_storage::cache::NoCache; //! # use mock_flash::MockFlashBase; //! # use futures::executor::block_on; //! # type Flash = MockFlashBase<10, 1, 4096>; @@ -12,7 +13,7 @@ //! # } //! # //! # fn init_flash() -> Flash { -//! # Flash::new(mock_flash::WriteCountCheck::Twice, None) +//! # Flash::new(mock_flash::WriteCountCheck::Twice, None, false) //! # } //! # //! # block_on(async { @@ -24,37 +25,42 @@ //! let flash_range = 0x1000..0x3000; //! // We need to give the crate a buffer to work with. //! // It must be big enough to serialize the biggest value of your storage type in. -//! let mut data_buffer = [0; 100]; +//! let mut data_buffer = [0; 128]; //! //! let my_data = [10, 47, 29]; //! //! // We can push some data to the queue -//! push(&mut flash, flash_range.clone(), &my_data, false).await.unwrap(); +//! push(&mut flash, flash_range.clone(), NoCache::new(), &my_data, false).await.unwrap(); //! //! // We can peek at the oldest data //! //! assert_eq!( -//! &peek(&mut flash, flash_range.clone(), &mut data_buffer).await.unwrap().unwrap()[..], +//! &peek(&mut flash, flash_range.clone(), NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..], //! &my_data[..] //! ); //! //! // With popping we get back the oldest data, but that data is now also removed //! //! assert_eq!( -//! &pop(&mut flash, flash_range.clone(), &mut data_buffer).await.unwrap().unwrap()[..], +//! &pop(&mut flash, flash_range.clone(), NoCache::new(), &mut data_buffer).await.unwrap().unwrap()[..], //! &my_data[..] //! ); //! //! // If we pop again, we find there's no data anymore //! //! assert_eq!( -//! pop(&mut flash, flash_range.clone(), &mut data_buffer).await, +//! pop(&mut flash, flash_range.clone(), NoCache::new(), &mut data_buffer).await, //! Ok(None) //! ); //! # }); //! ``` -use crate::item::{find_next_free_item_spot, is_page_empty, Item, ItemHeader, ItemHeaderIter}; +use crate::{ + cache::Cache, + item::{find_next_free_item_spot, is_page_empty, Item, ItemHeader, ItemHeaderIter}, +}; + +use self::cache::{CacheImpl, PageStatesCache}; use super::*; use embedded_storage_async::nor_flash::MultiwriteNorFlash; @@ -70,6 +76,7 @@ use embedded_storage_async::nor_flash::MultiwriteNorFlash; pub async fn push( flash: &mut S, flash_range: Range, + mut cache: impl CacheImpl, data: &[u8], allow_overwrite_old_data: bool, ) -> Result<(), Error> { @@ -79,22 +86,29 @@ pub async fn push( assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4); assert!(S::WORD_SIZE <= MAX_WORD_SIZE); + let cache = cache.inner(); + + if cache.is_dirty() { + cache.invalidate_cache_state(); + } + // Data must fit in a single page if data.len() > ItemHeader::available_data_bytes::((S::ERASE_SIZE - S::WORD_SIZE * 2) as u32).unwrap() as usize { + cache.unmark_dirty(); return Err(Error::BufferTooBig); } - let current_page = find_youngest_page(flash, flash_range.clone()).await?; + let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?; let page_data_start_address = calculate_page_address::(flash_range.clone(), current_page) + S::WORD_SIZE as u32; let page_data_end_address = calculate_page_end_address::(flash_range.clone(), current_page) - S::WORD_SIZE as u32; - partial_close_page(flash, flash_range.clone(), current_page).await?; + partial_close_page(flash, flash_range.clone(), cache, current_page).await?; // Find the last item on the page so we know where we need to write @@ -109,10 +123,10 @@ pub async fn push( if next_address.is_none() { // No cap left on this page, move to the next page let next_page = next_page::(flash_range.clone(), current_page); - match get_page_state(flash, flash_range.clone(), next_page).await? { + match get_page_state(flash, flash_range.clone(), cache, next_page).await? { PageState::Open => { - close_page(flash, flash_range.clone(), current_page).await?; - partial_close_page(flash, flash_range.clone(), next_page).await?; + close_page(flash, flash_range.clone(), cache, current_page).await?; + partial_close_page(flash, flash_range.clone(), cache, next_page).await?; next_address = Some( calculate_page_address::(flash_range.clone(), next_page) + S::WORD_SIZE as u32, @@ -124,31 +138,20 @@ pub async fn push( + S::WORD_SIZE as u32; if !allow_overwrite_old_data - && !is_page_empty(flash, flash_range.clone(), next_page, Some(state)).await? + && !is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state)) + .await? { + cache.unmark_dirty(); return Err(Error::FullStorage); } - flash - .erase( - calculate_page_address::(flash_range.clone(), next_page), - calculate_page_end_address::(flash_range.clone(), next_page), - ) - .await - .map_err(|e| Error::Storage { - value: e, - #[cfg(feature = "_test")] - backtrace: std::backtrace::Backtrace::capture(), - })?; - - close_page(flash, flash_range.clone(), current_page).await?; - partial_close_page(flash, flash_range.clone(), next_page).await?; + open_page(flash, flash_range.clone(), cache, next_page).await?; + close_page(flash, flash_range.clone(), cache, current_page).await?; + partial_close_page(flash, flash_range.clone(), cache, next_page).await?; next_address = Some(next_page_data_start_address); } PageState::PartialOpen => { // This should never happen - #[cfg(feature = "defmt")] - defmt::error!("Corrupted: A we expected an open or closed page, but found a partial open page"); return Err(Error::Corrupted { #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -159,6 +162,7 @@ pub async fn push( Item::write_new(flash, next_address.unwrap(), data).await?; + cache.unmark_dirty(); Ok(()) } @@ -167,12 +171,13 @@ pub async fn push( /// If you also want to remove the data use [pop_many]. /// /// Returns an iterator-like type that can be used to peek into the data. -pub async fn peek_many( +pub async fn peek_many( flash: &mut S, flash_range: Range, -) -> Result, Error> { + cache: CI, +) -> Result, Error> { Ok(PeekIterator { - iter: QueueIterator::new(flash, flash_range).await?, + iter: QueueIterator::new(flash, flash_range, cache).await?, }) } @@ -189,9 +194,13 @@ pub async fn peek_many( pub async fn peek<'d, S: NorFlash>( flash: &mut S, flash_range: Range, + cache: impl CacheImpl, data_buffer: &'d mut [u8], ) -> Result, Error> { - peek_many(flash, flash_range).await?.next(data_buffer).await + peek_many(flash, flash_range, cache) + .await? + .next(data_buffer) + .await } /// Pop the data from oldest to newest. @@ -199,12 +208,13 @@ pub async fn peek<'d, S: NorFlash>( /// If you don't want to remove the data use [peek_many]. /// /// Returns an iterator-like type that can be used to pop the data. -pub async fn pop_many( +pub async fn pop_many( flash: &mut S, flash_range: Range, -) -> Result, Error> { + cache: CI, +) -> Result, Error> { Ok(PopIterator { - iter: QueueIterator::new(flash, flash_range).await?, + iter: QueueIterator::new(flash, flash_range, cache).await?, }) } @@ -221,18 +231,22 @@ pub async fn pop_many( pub async fn pop<'d, S: MultiwriteNorFlash>( flash: &mut S, flash_range: Range, + cache: impl CacheImpl, data_buffer: &'d mut [u8], ) -> Result, Error> { - pop_many(flash, flash_range).await?.next(data_buffer).await + pop_many(flash, flash_range, cache) + .await? + .next(data_buffer) + .await } /// Iterator for pop'ing elements in the queue. #[derive(Debug)] -pub struct PopIterator<'d, S: MultiwriteNorFlash> { - iter: QueueIterator<'d, S>, +pub struct PopIterator<'d, S: MultiwriteNorFlash, CI: CacheImpl> { + iter: QueueIterator<'d, S, CI>, } -impl<'d, S: MultiwriteNorFlash> PopIterator<'d, S> { +impl<'d, S: MultiwriteNorFlash, CI: CacheImpl> PopIterator<'d, S, CI> { /// Pop the next data. /// /// The data is written to the given `data_buffer` and the part that was written is returned. @@ -245,6 +259,10 @@ impl<'d, S: MultiwriteNorFlash> PopIterator<'d, S> { &mut self, data_buffer: &'m mut [u8], ) -> Result, Error> { + if self.iter.cache.inner().is_dirty() { + self.iter.cache.inner().invalidate_cache_state(); + } + let reset_point = self.iter.create_reset_point(); if let Some((item, item_address)) = self.iter.next(data_buffer).await? { @@ -252,13 +270,17 @@ impl<'d, S: MultiwriteNorFlash> PopIterator<'d, S> { let ret = &mut data_buffer[..header.length as usize]; match header.erase_data(self.iter.flash, item_address).await { - Ok(_) => Ok(Some(ret)), + Ok(_) => { + self.iter.cache.inner().unmark_dirty(); + Ok(Some(ret)) + } Err(e) => { self.iter.recover_from_reset_point(reset_point); Err(e) } } } else { + self.iter.cache.inner().unmark_dirty(); Ok(None) } } @@ -266,11 +288,11 @@ impl<'d, S: MultiwriteNorFlash> PopIterator<'d, S> { /// Iterator for peek'ing elements in the queue. #[derive(Debug)] -pub struct PeekIterator<'d, S: NorFlash> { - iter: QueueIterator<'d, S>, +pub struct PeekIterator<'d, S: NorFlash, CI: CacheImpl> { + iter: QueueIterator<'d, S, CI>, } -impl<'d, S: NorFlash> PeekIterator<'d, S> { +impl<'d, S: NorFlash, CI: CacheImpl> PeekIterator<'d, S, CI> { /// Peek at the next data. /// /// The data is written to the given `data_buffer` and the part that was written is returned. @@ -283,6 +305,10 @@ impl<'d, S: NorFlash> PeekIterator<'d, S> { &mut self, data_buffer: &'m mut [u8], ) -> Result, Error> { + if self.iter.cache.inner().is_dirty() { + self.iter.cache.inner().invalidate_cache_state(); + } + Ok(self.iter.next(data_buffer).await?.map(|(item, _)| { let (header, data_buffer) = item.destruct(); &mut data_buffer[..header.length as usize] @@ -291,13 +317,14 @@ impl<'d, S: NorFlash> PeekIterator<'d, S> { } /// An iterator-like interface for peeking into data stored in flash. -struct QueueIterator<'d, S: NorFlash> { +struct QueueIterator<'d, S: NorFlash, CI: CacheImpl> { flash: &'d mut S, flash_range: Range, + cache: CI, current_address: CurrentAddress, } -impl<'d, S: NorFlash> Debug for QueueIterator<'d, S> { +impl<'d, S: NorFlash, CI: CacheImpl> Debug for QueueIterator<'d, S, CI> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("QueueIterator") .field("current_address", &self.current_address) @@ -311,23 +338,32 @@ enum CurrentAddress { PageAfter(usize), } -impl<'d, S: NorFlash> QueueIterator<'d, S> { - async fn new(flash: &'d mut S, flash_range: Range) -> Result> { +impl<'d, S: NorFlash, CI: CacheImpl> QueueIterator<'d, S, CI> { + async fn new( + flash: &'d mut S, + flash_range: Range, + mut cache: CI, + ) -> Result> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4); assert!(S::WORD_SIZE <= MAX_WORD_SIZE); + if cache.inner().is_dirty() { + cache.inner().invalidate_cache_state(); + } + // We start at the start of the oldest page let current_address = calculate_page_address::( flash_range.clone(), - find_oldest_page(flash, flash_range.clone()).await?, + find_oldest_page(flash, flash_range.clone(), cache.inner()).await?, ) + S::WORD_SIZE as u32; Ok(Self { flash, flash_range, + cache, current_address: CurrentAddress::Address(current_address), }) } @@ -338,17 +374,32 @@ impl<'d, S: NorFlash> QueueIterator<'d, S> { ) -> Result, u32)>, Error> { let mut data_buffer = Some(data_buffer); + if self.cache.inner().is_dirty() { + self.cache.inner().invalidate_cache_state(); + } + loop { // Get the current page and address based on what was stored let (current_page, current_address) = match self.current_address { CurrentAddress::PageAfter(previous_page) => { let next_page = next_page::(self.flash_range.clone(), previous_page); - if get_page_state(self.flash, self.flash_range.clone(), next_page) - .await? - .is_open() + if get_page_state( + self.flash, + self.flash_range.clone(), + self.cache.inner(), + next_page, + ) + .await? + .is_open() || next_page - == find_oldest_page(self.flash, self.flash_range.clone()).await? + == find_oldest_page( + self.flash, + self.flash_range.clone(), + self.cache.inner(), + ) + .await? { + self.cache.inner().unmark_dirty(); return Ok(None); } @@ -404,6 +455,7 @@ impl<'d, S: NorFlash> QueueIterator<'d, S> { CurrentAddress::Address(next_address) }; // Return the item we found + self.cache.inner().unmark_dirty(); return Ok(Some((item, found_item_address))); } } @@ -433,6 +485,7 @@ struct QueueIteratorResetPoint(CurrentAddress); pub async fn find_max_fit( flash: &mut S, flash_range: Range, + mut cache: impl CacheImpl, ) -> Result, Error> { assert_eq!(flash_range.start % S::ERASE_SIZE as u32, 0); assert_eq!(flash_range.end % S::ERASE_SIZE as u32, 0); @@ -440,17 +493,25 @@ pub async fn find_max_fit( assert!(S::ERASE_SIZE >= S::WORD_SIZE * 4); assert!(S::WORD_SIZE <= MAX_WORD_SIZE); - let current_page = find_youngest_page(flash, flash_range.clone()).await?; + let cache = cache.inner(); + + if cache.is_dirty() { + cache.invalidate_cache_state(); + } + + let current_page = find_youngest_page(flash, flash_range.clone(), cache).await?; // Check if we have space on the next page let next_page = next_page::(flash_range.clone(), current_page); - match get_page_state(flash, flash_range.clone(), next_page).await? { + match get_page_state(flash, flash_range.clone(), cache, next_page).await? { state @ PageState::Closed => { - if is_page_empty(flash, flash_range.clone(), next_page, Some(state)).await? { + if is_page_empty(flash, flash_range.clone(), cache, next_page, Some(state)).await? { + cache.unmark_dirty(); return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32)); } } PageState::Open => { + cache.unmark_dirty(); return Ok(Some((S::ERASE_SIZE - (2 * S::WORD_SIZE)) as u32)); } PageState::PartialOpen => { @@ -473,6 +534,7 @@ pub async fn find_max_fit( .await? .1; + cache.unmark_dirty(); Ok(ItemHeader::available_data_bytes::( page_data_end_address - next_item_address, )) @@ -481,25 +543,23 @@ pub async fn find_max_fit( async fn find_youngest_page( flash: &mut S, flash_range: Range, + cache: &mut Cache, ) -> Result> { let last_used_page = - find_first_page(flash, flash_range.clone(), 0, PageState::PartialOpen).await?; + find_first_page(flash, flash_range.clone(), cache, 0, PageState::PartialOpen).await?; if let Some(last_used_page) = last_used_page { return Ok(last_used_page); } // We have no partial open page. Search for an open page to start in - let first_open_page = find_first_page(flash, flash_range, 0, PageState::Open).await?; + let first_open_page = find_first_page(flash, flash_range, cache, 0, PageState::Open).await?; if let Some(first_open_page) = first_open_page { return Ok(first_open_page); } // All pages are closed... This is not correct. - #[cfg(feature = "defmt")] - defmt::error!("Corrupted: All pages are closed"); - Err(Error::Corrupted { #[cfg(feature = "_test")] backtrace: std::backtrace::Backtrace::capture(), @@ -509,12 +569,13 @@ async fn find_youngest_page( async fn find_oldest_page( flash: &mut S, flash_range: Range, + cache: &mut Cache, ) -> Result> { - let youngest_page = find_youngest_page(flash, flash_range.clone()).await?; + let youngest_page = find_youngest_page(flash, flash_range.clone(), cache).await?; // The oldest page is the first non-open page after the youngest page let oldest_closed_page = - find_first_page(flash, flash_range, youngest_page, PageState::Closed).await?; + find_first_page(flash, flash_range, cache, youngest_page, PageState::Closed).await?; Ok(oldest_closed_page.unwrap_or(youngest_page)) } @@ -532,13 +593,18 @@ async fn find_oldest_page( pub async fn try_repair( flash: &mut S, flash_range: Range, + mut cache: impl CacheImpl, ) -> Result<(), Error> { + cache.inner().invalidate_cache_state(); + drop(cache); + crate::try_general_repair(flash, flash_range.clone()).await?; Ok(()) } #[cfg(test)] mod tests { + use crate::cache::PrivateCacheImpl; use crate::mock_flash::WriteCountCheck; use super::*; @@ -549,15 +615,20 @@ mod tests { #[test] async fn peek_and_overwrite_old_data() { - let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true); let flash_range = 0x00..0x40; let mut data_buffer = AlignedBuf([0; 1024]); const DATA_SIZE: usize = 22; assert_eq!( - peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap(), + peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap(), None ); @@ -565,32 +636,44 @@ mod tests { push( &mut flash, flash_range.clone(), + cache::NoCache::new(), &data_buffer[..DATA_SIZE], false, ) .await .unwrap(); assert_eq!( - &peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xAA; DATA_SIZE] ); data_buffer[..DATA_SIZE].copy_from_slice(&[0xBB; DATA_SIZE]); push( &mut flash, flash_range.clone(), + cache::NoCache::new(), &data_buffer[..DATA_SIZE], false, ) .await .unwrap(); assert_eq!( - &peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xAA; DATA_SIZE] ); @@ -599,6 +682,7 @@ mod tests { push( &mut flash, flash_range.clone(), + cache::NoCache::new(), &data_buffer[..DATA_SIZE], false, ) @@ -609,6 +693,7 @@ mod tests { push( &mut flash, flash_range.clone(), + cache::NoCache::new(), &data_buffer[..DATA_SIZE], true, ) @@ -616,52 +701,82 @@ mod tests { .unwrap(); assert_eq!( - &peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xBB; DATA_SIZE] ); assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xBB; DATA_SIZE] ); assert_eq!( - &peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xDD; DATA_SIZE] ); assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xDD; DATA_SIZE] ); assert_eq!( - peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap(), + peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap(), None ); assert_eq!( - pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap(), + pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap(), None ); } #[test] async fn push_pop() { - let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true); let flash_range = 0x000..0x1000; let mut data_buffer = AlignedBuf([0; 1024]); @@ -669,29 +784,50 @@ mod tests { println!("{i}"); let data = vec![i as u8; i % 512 + 1]; - push(&mut flash, flash_range.clone(), &data, true) - .await - .unwrap(); + push( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &data, + true, + ) + .await + .unwrap(); assert_eq!( - &peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &data, "At {i}" ); assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &data, "At {i}" ); assert_eq!( - peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap(), + peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap(), None, "At {i}" ); @@ -700,40 +836,61 @@ mod tests { #[test] async fn push_pop_tiny() { - let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true); let flash_range = 0x00..0x40; - let mut data_buffer = [0; 1024]; + let mut data_buffer = AlignedBuf([0; 1024]); for i in 0..2000 { println!("{i}"); let data = vec![i as u8; i % 20 + 1]; println!("PUSH"); - push(&mut flash, flash_range.clone(), &data, true) - .await - .unwrap(); + push( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &data, + true, + ) + .await + .unwrap(); assert_eq!( - &peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &data, "At {i}" ); println!("POP"); assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &data, "At {i}" ); println!("PEEK"); assert_eq!( - peek(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap(), + peek( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap(), None, "At {i}" ); @@ -744,26 +901,30 @@ mod tests { #[test] /// Same as [push_lots_then_pop_lots], except with added peeking and using the iterator style async fn push_peek_pop_many() { - let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true); let flash_range = 0x000..0x1000; - let mut data_buffer = [0; 1024]; + let mut data_buffer = AlignedBuf([0; 1024]); let mut push_ops = (0, 0, 0, 0); let mut peek_ops = (0, 0, 0, 0); let mut pop_ops = (0, 0, 0, 0); + let mut cache = cache::NoCache::new(); + for loop_index in 0..100 { println!("Loop index: {loop_index}"); for i in 0..20 { let data = vec![i as u8; 50]; - push(&mut flash, flash_range.clone(), &data, false) + push(&mut flash, flash_range.clone(), &mut cache, &data, false) .await .unwrap(); add_ops(&mut flash, &mut push_ops); } - let mut peeker = peek_many(&mut flash, flash_range.clone()).await.unwrap(); + let mut peeker = peek_many(&mut flash, flash_range.clone(), &mut cache) + .await + .unwrap(); for i in 0..5 { let mut data = vec![i as u8; 50]; assert_eq!( @@ -774,7 +935,9 @@ mod tests { add_ops(peeker.iter.flash, &mut peek_ops); } - let mut popper = pop_many(&mut flash, flash_range.clone()).await.unwrap(); + let mut popper = pop_many(&mut flash, flash_range.clone(), &mut cache) + .await + .unwrap(); for i in 0..5 { let data = vec![i as u8; 50]; assert_eq!( @@ -787,13 +950,15 @@ mod tests { for i in 20..25 { let data = vec![i as u8; 50]; - push(&mut flash, flash_range.clone(), &data, false) + push(&mut flash, flash_range.clone(), &mut cache, &data, false) .await .unwrap(); add_ops(&mut flash, &mut push_ops); } - let mut peeker = peek_many(&mut flash, flash_range.clone()).await.unwrap(); + let mut peeker = peek_many(&mut flash, flash_range.clone(), &mut cache) + .await + .unwrap(); for i in 5..25 { let data = vec![i as u8; 50]; assert_eq!( @@ -804,7 +969,9 @@ mod tests { add_ops(peeker.iter.flash, &mut peek_ops); } - let mut popper = pop_many(&mut flash, flash_range.clone()).await.unwrap(); + let mut popper = pop_many(&mut flash, flash_range.clone(), &mut cache) + .await + .unwrap(); for i in 5..25 { let data = vec![i as u8; 50]; assert_eq!( @@ -828,9 +995,9 @@ mod tests { #[test] async fn push_lots_then_pop_lots() { - let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true); let flash_range = 0x000..0x1000; - let mut data_buffer = [0; 1024]; + let mut data_buffer = AlignedBuf([0; 1024]); let mut push_ops = (0, 0, 0, 0); let mut pop_ops = (0, 0, 0, 0); @@ -840,19 +1007,30 @@ mod tests { for i in 0..20 { let data = vec![i as u8; 50]; - push(&mut flash, flash_range.clone(), &data, false) - .await - .unwrap(); + push( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &data, + false, + ) + .await + .unwrap(); add_ops(&mut flash, &mut push_ops); } for i in 0..5 { let data = vec![i as u8; 50]; assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &data, "At {i}" ); @@ -861,19 +1039,30 @@ mod tests { for i in 20..25 { let data = vec![i as u8; 50]; - push(&mut flash, flash_range.clone(), &data, false) - .await - .unwrap(); + push( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &data, + false, + ) + .await + .unwrap(); add_ops(&mut flash, &mut push_ops); } for i in 5..25 { let data = vec![i as u8; 50]; assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &data, "At {i}" ); @@ -923,54 +1112,87 @@ mod tests { #[test] async fn pop_with_empty_section() { - let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashTiny::new(WriteCountCheck::Twice, None, true); let flash_range = 0x00..0x40; let mut data_buffer = AlignedBuf([0; 1024]); data_buffer[..20].copy_from_slice(&[0xAA; 20]); - push(&mut flash, flash_range.clone(), &data_buffer[0..20], false) - .await - .unwrap(); + push( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &data_buffer[0..20], + false, + ) + .await + .unwrap(); data_buffer[..20].copy_from_slice(&[0xBB; 20]); - push(&mut flash, flash_range.clone(), &data_buffer[0..20], false) - .await - .unwrap(); + push( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &data_buffer[0..20], + false, + ) + .await + .unwrap(); // There's now an unused gap at the end of the first page assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xAA; 20] ); assert_eq!( - &pop(&mut flash, flash_range.clone(), &mut data_buffer) - .await - .unwrap() - .unwrap()[..], + &pop( + &mut flash, + flash_range.clone(), + cache::NoCache::new(), + &mut data_buffer + ) + .await + .unwrap() + .unwrap()[..], &[0xBB; 20] ); } #[test] async fn search_pages() { - let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None); + let mut flash = MockFlashBig::new(WriteCountCheck::Twice, None, true); const FLASH_RANGE: Range = 0x000..0x1000; - close_page(&mut flash, FLASH_RANGE, 0).await.unwrap(); - close_page(&mut flash, FLASH_RANGE, 1).await.unwrap(); - partial_close_page(&mut flash, FLASH_RANGE, 2) + close_page(&mut flash, FLASH_RANGE, cache::NoCache::new().inner(), 0) + .await + .unwrap(); + close_page(&mut flash, FLASH_RANGE, cache::NoCache::new().inner(), 1) + .await + .unwrap(); + partial_close_page(&mut flash, FLASH_RANGE, cache::NoCache::new().inner(), 2) .await .unwrap(); assert_eq!( - find_youngest_page(&mut flash, FLASH_RANGE).await.unwrap(), + find_youngest_page(&mut flash, FLASH_RANGE, cache::NoCache::new().inner()) + .await + .unwrap(), 2 ); - assert_eq!(find_oldest_page(&mut flash, FLASH_RANGE).await.unwrap(), 0); + assert_eq!( + find_oldest_page(&mut flash, FLASH_RANGE, cache::NoCache::new().inner()) + .await + .unwrap(), + 0 + ); } }