From 303ce97a37060b03d93308d08c611c1574f78841 Mon Sep 17 00:00:00 2001 From: Paul Schoenfelder Date: Sun, 4 Aug 2024 06:43:56 -0400 Subject: [PATCH] test: no-std rwlock primitive, with documentation --- Cargo.lock | 97 ++++++++++++++++++++ Makefile | 4 + core/Cargo.toml | 8 ++ core/src/utils/sync.rs | 197 +++++++++++++++++++++++++++++++++++++++-- 4 files changed, 298 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5788b395c6..80b44aed47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -679,6 +679,19 @@ dependencies = [ "pin-utils", ] +[[package]] +name = "generator" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "979f00864edc7516466d6b3157706e06c032f22715700ddd878228a91d02bc56" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -926,6 +939,19 @@ dependencies = [ "log", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "malloc_buf" version = "0.0.6" @@ -1000,6 +1026,7 @@ name = "miden-core" version = "0.10.0" dependencies = [ "lock_api", + "loom", "memchr", "miden-crypto", "miden-formatting", @@ -1768,6 +1795,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -2417,6 +2450,70 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.48.0" diff --git a/Makefile b/Makefile index 7cb2d25a61..e46a8f8c74 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,10 @@ test-fast: ## Runs all tests with the debug profile test-skip-proptests: ## Runs all tests, except property-based tests $(DEBUG_ASSERTIONS) cargo nextest run --features testing -E 'not test(#*proptest)' +.PHONY: test-loom +test-loom: ## Runs all loom-based tests + RUSTFLAGS="--cfg loom" cargo nextest run --cargo-profile test-release --features testing -E 'test(#*loom)' + # --- checking ------------------------------------------------------------------------------------ .PHONY: check diff --git a/core/Cargo.toml b/core/Cargo.toml index b812725752..90ffa79991 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -49,3 +49,11 @@ winter-utils = { package = "winter-utils", version = "0.9", default-features = f [dev-dependencies] proptest = "1.3" rand_utils = { version = "0.9", package = "winter-rand-utils" } +loom = "0.7" + +[target.'cfg(loom)'.dependencies] +loom = "0.7" + +# Enable once the VM reaches Rust 1.80+ +#[lints.rust] +#unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)'] } diff --git a/core/src/utils/sync.rs b/core/src/utils/sync.rs index 2610e4855f..0c96ff9586 100644 --- a/core/src/utils/sync.rs +++ b/core/src/utils/sync.rs @@ -1,37 +1,114 @@ #[cfg(feature = "std")] -pub use parking_lot::RwLock; +pub use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; #[cfg(not(feature = "std"))] -pub use self::no_std::RwLock; +pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -#[cfg(not(feature = "std"))] -mod no_std { - use core::sync::atomic::{AtomicUsize, Ordering}; +pub mod rwlock { + #[cfg(not(loom))] + use core::{ + hint, + sync::atomic::{AtomicUsize, Ordering}, + }; + + #[cfg(loom)] + use loom::{ + hint, + sync::atomic::{AtomicUsize, Ordering}, + }; use lock_api::RawRwLock; + /// An implementation of a reader-writer lock, based on a spinlock primitive, no-std compatible + /// + /// See [lock_api::RwLock] for usage. pub type RwLock = lock_api::RwLock; + /// See [lock_api::RwLockReadGuard] for usage. + pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, Spinlock, T>; + + /// See [lock_api::RwLockWriteGuard] for usage. + pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, Spinlock, T>; + + /// The underlying raw reader-writer primitive that implements [lock_api::RawRwLock] + /// + /// This is fundamentally a spinlock, in that blocking operations on the lock will spin until + /// they succeed in acquiring/releasing the lock. + /// + /// To acheive the ability to share the underlying data with multiple readers, or hold + /// exclusive access for one writer, the lock state is based on a "locked" count, where shared + /// access increments the count by an even number, and acquiring exclusive access relies on the + /// use of the lowest order bit to stop further shared acquisition, and indicate that the lock + /// is exclusively held (the difference between the two is irrelevant from the perspective of + /// a thread attempting to acquire the lock, but internally the state uses `usize::MAX` as the + /// "exlusively locked" sentinel). + /// + /// This mechanism gets us the following: + /// + /// * Whether the lock has been acquired (shared or exclusive) + /// * Whether the lock is being exclusively acquired + /// * How many times the lock has been acquired + /// * Whether the acquisition(s) are exclusive or shared + /// + /// Further implementation details, such as how we manage draining readers once an attempt to + /// exclusively acquire the lock occurs, are described below. + /// + /// NOTE: This is a simple implementation, meant for use in no-std environments; there are much + /// more robust/performant implementations available when OS primitives can be used. pub struct Spinlock { + /// The state of the lock, primarily representing the acquisition count, but relying on + /// the distinction between even and odd values to indicate whether or not exclusive access + /// is being acquired. state: AtomicUsize, + /// A counter used to wake a parked writer once the last shared lock is released during + /// acquisition of an exclusive lock. The actual count is not acutally important, and + /// simply wraps around on overflow, but what is important is that when the value changes, + /// the writer will wake and resume attempting to acquire the exclusive lock. writer_wake_counter: AtomicUsize, } + + impl Default for Spinlock { + #[inline(always)] + fn default() -> Self { + Self::new() + } + } + impl Spinlock { - const fn new() -> Self { + #[cfg(not(loom))] + pub const fn new() -> Self { + Self { + state: AtomicUsize::new(0), + writer_wake_counter: AtomicUsize::new(0), + } + } + + #[cfg(loom)] + pub fn new() -> Self { Self { state: AtomicUsize::new(0), writer_wake_counter: AtomicUsize::new(0), } } } + unsafe impl RawRwLock for Spinlock { + #[cfg(loom)] + const INIT: Spinlock = unimplemented!(); + + #[cfg(not(loom))] + // This is intentional on the part of the [RawRwLock] API, basically a hack to provide + // initial values as static items. + #[allow(clippy::declare_interior_mutable_const)] const INIT: Spinlock = Spinlock::new(); type GuardMarker = lock_api::GuardSend; + /// The operation invoked when calling `RwLock::read`, blocks the caller until acquired fn lock_shared(&self) { let mut s = self.state.load(Ordering::Relaxed); loop { + // If the exclusive bit is unset, attempt to acquire a read lock if s & 1 == 0 { match self.state.compare_exchange_weak( s, @@ -40,14 +117,18 @@ mod no_std { Ordering::Relaxed, ) { Ok(_) => return, + // Someone else beat us to the punch and acquired a lock Err(e) => s = e, } } + // If an exclusive lock is held/being acquired, loop until the lock state changes + // at which point, try to acquire the lock again if s & 1 == 1 { loop { let next = self.state.load(Ordering::Relaxed); if s == next { - core::hint::spin_loop(); + hint::spin_loop(); + continue; } else { s = next; break; @@ -56,6 +137,9 @@ mod no_std { } } } + + /// The operation invoked when calling `RwLock::try_read`, returns whether or not the + /// lock was acquired fn try_lock_shared(&self) -> bool { let s = self.state.load(Ordering::Relaxed); if s & 1 == 0 { @@ -67,15 +151,23 @@ mod no_std { } } + /// The operation invoked when dropping a `RwLockReadGuard` unsafe fn unlock_shared(&self) { if self.state.fetch_sub(2, Ordering::Release) == 3 { + // The lock is being exclusively acquired, and we're the last shared acquisition + // to be released, so wake the writer by incrementing the wake counter self.writer_wake_counter.fetch_add(1, Ordering::Release); } } + /// The operation invoked when calling `RwLock::write`, blocks the caller until acquired fn lock_exclusive(&self) { let mut s = self.state.load(Ordering::Relaxed); loop { + // Attempt to acquire the lock immediately, or complete acquistion of the lock + // if we're continuing the loop after acquiring the exclusive bit. If another + // thread acquired it first, we race to be the first thread to acquire it once + // released, by busy looping here. if s <= 1 { match self.state.compare_exchange( s, @@ -86,32 +178,47 @@ mod no_std { Ok(_) => return, Err(e) => { s = e; + hint::spin_loop(); continue; } } } + // Only shared locks have been acquired, attempt to acquire the exclusive bit, + // which will prevent further shared locks from being acquired. It does not + // in and of itself grant us exclusive access however. if s & 1 == 0 { if let Err(e) = self.state.compare_exchange(s, s + 1, Ordering::Relaxed, Ordering::Relaxed) { + // The lock state has changed before we could acquire the exclusive bit, + // update our view of the lock state and try again s = e; continue; } } + // We've acquired the exclusive bit, now we need to busy wait until all shared + // acquisitions are released. let w = self.writer_wake_counter.load(Ordering::Acquire); s = self.state.load(Ordering::Relaxed); + // "Park" the thread here (by busy looping), until the release of the last shared + // lock, which is communicated to us by it incrementing the wake counter. if s >= 2 { while self.writer_wake_counter.load(Ordering::Acquire) == w { - core::hint::spin_loop(); + hint::spin_loop(); } s = self.state.load(Ordering::Relaxed); } + + // All shared locks have been released, go back to the top and try to complete + // acquisition of exclusive access. } } + /// The operation invoked when calling `RwLock::try_write`, returns whether or not the + /// lock was acquired fn try_lock_exclusive(&self) -> bool { let s = self.state.load(Ordering::Relaxed); if s <= 1 { @@ -123,9 +230,83 @@ mod no_std { } } + /// The operation invoked when dropping a `RwLockWriteGuard` unsafe fn unlock_exclusive(&self) { + // Infallible, as we hold an exclusive lock + // + // Note the use of `Release` ordering here, which ensures any loads of the lock state + // by other threads, are ordered after this store. self.state.store(0, Ordering::Release); + // This fetch_add isn't important for signaling purposes, however it serves a key + // purpose, in that it imposes a memory ordering on any loads of this field that + // have an `Acquire` ordering, i.e. they will read the value stored here. Without + // a `Release` store, loads/stores of this field could be reordered relative to + // each other. self.writer_wake_counter.fetch_add(1, Ordering::Release); } } } + +#[cfg(all(loom, test))] +mod test { + use super::rwlock::{RwLock, Spinlock}; + use alloc::vec::Vec; + use loom::{model::Builder, sync::Arc}; + + #[test] + fn test_rwlock_loom() { + let mut builder = Builder::default(); + builder.max_duration = Some(std::time::Duration::from_secs(60)); + builder.log = true; + builder.check(|| { + let raw_rwlock = Spinlock::new(); + let n = Arc::new(RwLock::from_raw(raw_rwlock, 0usize)); + let mut readers = Vec::new(); + let mut writers = Vec::new(); + + let num_readers = 2; + let num_writers = 2; + let num_iterations = 2; + + // Readers should never observe a non-zero value + for _ in 0..num_readers { + let n0 = n.clone(); + let t = loom::thread::spawn(move || { + for _ in 0..num_iterations { + let guard = n0.read(); + assert_eq!(*guard, 0); + } + }); + + readers.push(t); + } + + // Writers should never observe a non-zero value once they've + // acquired the lock, and should never observe a value > 1 + // while holding the lock + for _ in 0..num_writers { + let n0 = n.clone(); + let t = loom::thread::spawn(move || { + for _ in 0..num_iterations { + let mut guard = n0.write(); + assert_eq!(*guard, 0); + *guard += 1; + assert_eq!(*guard, 1); + *guard -= 1; + assert_eq!(*guard, 0); + } + }); + + writers.push(t); + } + + for t in readers { + t.join().unwrap(); + } + + for t in writers { + t.join().unwrap(); + } + }) + } +}