diff --git a/esp-hal/src/lib.rs b/esp-hal/src/lib.rs index 5dcae8029a3..2574524a80f 100644 --- a/esp-hal/src/lib.rs +++ b/esp-hal/src/lib.rs @@ -237,7 +237,9 @@ pub mod uart; pub mod usb_serial_jtag; pub mod debugger; -mod lock; + +#[doc(hidden)] +pub mod sync; /// State of the CPU saved when entering exception or interrupt pub mod trapframe { diff --git a/esp-hal/src/soc/mod.rs b/esp-hal/src/soc/mod.rs index a24f409bed2..b0cf18de82f 100644 --- a/esp-hal/src/soc/mod.rs +++ b/esp-hal/src/soc/mod.rs @@ -2,7 +2,7 @@ use portable_atomic::{AtomicU8, Ordering}; pub use self::implementation::*; #[cfg(psram)] -use crate::lock::Locked; +use crate::sync::Locked; #[cfg_attr(esp32, path = "esp32/mod.rs")] #[cfg_attr(esp32c2, path = "esp32c2/mod.rs")] diff --git a/esp-hal/src/lock.rs b/esp-hal/src/sync.rs similarity index 64% rename from esp-hal/src/lock.rs rename to esp-hal/src/sync.rs index 932dee32b8a..0b50edfba4d 100644 --- a/esp-hal/src/lock.rs +++ b/esp-hal/src/sync.rs @@ -1,23 +1,38 @@ +//! Under construction: This is public only for tests, please avoid using it. + +#[cfg(single_core)] +use core::cell::Cell; use core::cell::UnsafeCell; mod single_core { + use core::sync::atomic::{compiler_fence, Ordering}; + pub unsafe fn disable_interrupts() -> critical_section::RawRestoreState { cfg_if::cfg_if! { if #[cfg(riscv)] { let mut mstatus = 0u32; core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus); - ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState + let token = ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState; } else if #[cfg(xtensa)] { let token: critical_section::RawRestoreState; core::arch::asm!("rsil {0}, 5", out(reg) token); - token } else { compile_error!("Unsupported architecture") } - } + }; + + // Ensure no subsequent memory accesses are reordered to before interrupts are + // disabled. + compiler_fence(Ordering::SeqCst); + + token } pub unsafe fn reenable_interrupts(token: critical_section::RawRestoreState) { + // Ensure no preceeding memory accesses are reordered to after interrupts are + // enabled. + compiler_fence(Ordering::SeqCst); + cfg_if::cfg_if! { if #[cfg(riscv)] { if token != 0 { @@ -41,21 +56,6 @@ mod single_core { mod multicore { use portable_atomic::{AtomicUsize, Ordering}; - cfg_if::cfg_if! { - if #[cfg(riscv)] { - // The restore state is a u8 that is casted from a bool, so it has a value of - // 0x00 or 0x01 before we add the reentry flag to it. - pub const REENTRY_FLAG: u8 = 1 << 7; - } else if #[cfg(xtensa)] { - // PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can use bit - // #31 as our reentry flag. - // We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be - // undefined behavior: Quoting the ISA summary, table 64: - // Writing a non-zero value to these fields results in undefined processor behavior. - pub const REENTRY_FLAG: u32 = 1 << 31; - } - } - // Safety: Ensure that when adding new chips `get_raw_core` doesn't return this // value. // FIXME: ensure in HIL tests this is the case! @@ -106,23 +106,65 @@ mod multicore { } } -pub(crate) struct Lock { +cfg_if::cfg_if! { + if #[cfg(riscv)] { + // The restore state is a u8 that is casted from a bool, so it has a value of + // 0x00 or 0x01 before we add the reentry flag to it. + pub const REENTRY_FLAG: u8 = 1 << 7; + } else if #[cfg(xtensa)] { + // PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can use bit + // #31 as our reentry flag. + // We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be + // undefined behavior: Quoting the ISA summary, table 64: + // Writing a non-zero value to these fields results in undefined processor behavior. + pub const REENTRY_FLAG: u32 = 1 << 31; + } +} + +/// A lock that can be used to protect shared resources. +pub struct Lock { #[cfg(multi_core)] inner: multicore::AtomicLock, + #[cfg(single_core)] + is_locked: Cell, +} + +unsafe impl Sync for Lock {} + +impl Default for Lock { + fn default() -> Self { + Self::new() + } } impl Lock { + /// Create a new lock. pub const fn new() -> Self { Self { #[cfg(multi_core)] inner: multicore::AtomicLock::new(), + #[cfg(single_core)] + is_locked: Cell::new(false), } } - fn acquire(&self) -> critical_section::RawRestoreState { + /// Acquires the lock. + /// + /// # Safety + /// + /// - Each release call must be paired with an acquire call. + /// - The returned token must be passed to the corresponding `release` call. + /// - The caller must ensure to release the locks in the reverse order they + /// were acquired. + pub unsafe fn acquire(&self) -> critical_section::RawRestoreState { cfg_if::cfg_if! { if #[cfg(single_core)] { - unsafe { single_core::disable_interrupts() } + let mut tkn = unsafe { single_core::disable_interrupts() }; + let was_locked = self.is_locked.replace(true); + if was_locked { + tkn |= REENTRY_FLAG; + } + tkn } else if #[cfg(multi_core)] { // We acquire the lock inside an interrupt-free context to prevent a subtle // race condition: @@ -138,7 +180,7 @@ impl Lock { match self.inner.try_lock(current_thread_id) { Ok(()) => Some(tkn), Err(owner) if owner == current_thread_id => { - tkn |= multicore::REENTRY_FLAG; + tkn |= REENTRY_FLAG; Some(tkn) } Err(_) => { @@ -158,27 +200,31 @@ impl Lock { } } + /// Releases the lock. + /// /// # Safety - /// This function must only be called if the lock was acquired by the - /// current thread. - unsafe fn release(&self, token: critical_section::RawRestoreState) { - #[cfg(multi_core)] - { - if token & multicore::REENTRY_FLAG != 0 { - return; - } - + /// + /// - This function must only be called if the lock was acquired by the + /// current thread. + /// - The caller must ensure to release the locks in the reverse order they + /// were acquired. + /// - Each release call must be paired with an acquire call. + pub unsafe fn release(&self, token: critical_section::RawRestoreState) { + if token & REENTRY_FLAG == 0 { + #[cfg(multi_core)] self.inner.unlock(); - } - single_core::reenable_interrupts(token); + #[cfg(single_core)] + self.is_locked.set(false); + + single_core::reenable_interrupts(token); + } } } // This is preferred over critical-section as this allows you to have multiple // locks active at the same time rather than using the global mutex that is // critical-section. -#[allow(unused_variables)] pub(crate) fn lock(lock: &Lock, f: impl FnOnce() -> T) -> T { // In regards to disabling interrupts, we only need to disable // the interrupts that may be calling this function. @@ -190,13 +236,13 @@ pub(crate) fn lock(lock: &Lock, f: impl FnOnce() -> T) -> T { impl<'a> LockGuard<'a> { fn new(lock: &'a Lock) -> Self { - let token = lock.acquire(); - - #[cfg(multi_core)] - assert!( - token & multicore::REENTRY_FLAG == 0, - "lock is not reentrant" - ); + let token = unsafe { + // SAFETY: the same lock will be released when dropping the guard. + // This ensures that the lock is released on the same thread, in the reverse + // order it was acquired. + lock.acquire() + }; + assert!(token & REENTRY_FLAG == 0, "lock is not reentrant"); Self { lock, token } } @@ -212,25 +258,26 @@ pub(crate) fn lock(lock: &Lock, f: impl FnOnce() -> T) -> T { f() } -/// Data protected by a [Lock] -#[allow(unused)] -pub(crate) struct Locked { +/// Data protected by a [Lock]. +/// +/// This is largely equivalent to a `Mutex>`, but accessing the inner +/// data doesn't hold a critical section on multi-core systems. +pub struct Locked { lock_state: Lock, data: UnsafeCell, } -#[allow(unused)] impl Locked { /// Create a new instance - pub(crate) const fn new(data: T) -> Self { + pub const fn new(data: T) -> Self { Self { lock_state: Lock::new(), data: UnsafeCell::new(data), } } - /// Provide exclusive access to the protected data to the given closure - pub(crate) fn with(&self, f: impl FnOnce(&mut T) -> R) -> R { + /// Provide exclusive access to the protected data to the given closure. + pub fn with(&self, f: impl FnOnce(&mut T) -> R) -> R { lock(&self.lock_state, || f(unsafe { &mut *self.data.get() })) } } diff --git a/esp-hal/src/timer/systimer.rs b/esp-hal/src/timer/systimer.rs index 42b5a8e9539..0d1ae5e94ea 100644 --- a/esp-hal/src/timer/systimer.rs +++ b/esp-hal/src/timer/systimer.rs @@ -79,9 +79,9 @@ use fugit::{Instant, MicrosDurationU32, MicrosDurationU64}; use super::{Error, Timer as _}; use crate::{ interrupt::{self, InterruptHandler}, - lock::{lock, Lock}, peripheral::Peripheral, peripherals::{Interrupt, SYSTIMER}, + sync::{lock, Lock}, system::{Peripheral as PeripheralEnable, PeripheralClockControl}, Async, Blocking, diff --git a/esp-hal/src/timer/timg.rs b/esp-hal/src/timer/timg.rs index 965d51b1984..19397971d1a 100644 --- a/esp-hal/src/timer/timg.rs +++ b/esp-hal/src/timer/timg.rs @@ -76,10 +76,10 @@ use crate::soc::constants::TIMG_DEFAULT_CLK_SRC; use crate::{ clock::Clocks, interrupt::{self, InterruptHandler}, - lock::{lock, Lock}, peripheral::{Peripheral, PeripheralRef}, peripherals::{timg0::RegisterBlock, Interrupt, TIMG0}, private::Sealed, + sync::{lock, Lock}, system::{Peripheral as PeripheralEnable, PeripheralClockControl}, Async, Blocking, diff --git a/hil-test/Cargo.toml b/hil-test/Cargo.toml index 8c975393c81..4dd400b47da 100644 --- a/hil-test/Cargo.toml +++ b/hil-test/Cargo.toml @@ -23,6 +23,10 @@ harness = false name = "crc" harness = false +[[test]] +name = "critical_section" +harness = false + [[test]] name = "delay" harness = false diff --git a/hil-test/tests/critical_section.rs b/hil-test/tests/critical_section.rs new file mode 100644 index 00000000000..00bbf6fc135 --- /dev/null +++ b/hil-test/tests/critical_section.rs @@ -0,0 +1,53 @@ +//! Ensure invariants of locks are upheld. + +//% CHIPS: esp32 esp32c2 esp32c3 esp32c6 esp32h2 esp32s2 esp32s3 + +// TODO: add multi-core tests + +#![no_std] +#![no_main] + +use hil_test as _; + +#[cfg(test)] +#[embedded_test::tests] +mod tests { + use esp_hal::sync::Locked; + + #[test] + fn critical_section_is_reentrant() { + let mut flag = false; + + critical_section::with(|_| { + critical_section::with(|_| { + flag = true; + }); + }); + + assert!(flag); + } + + #[test] + fn locked_can_provide_mutable_access() { + let flag = Locked::new(false); + + flag.with(|f| { + *f = true; + }); + flag.with(|f| { + assert!(*f); + }); + } + + #[test] + #[should_panic] + fn locked_is_not_reentrant() { + let flag = Locked::new(false); + + flag.with(|_f| { + flag.with(|f| { + *f = true; + }); + }); + } +}