diff --git a/rust-toolchain b/rust-toolchain index df484cbb1d..80627411dc 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -1.71.0 +1.74.1 diff --git a/src/lock/api.rs b/src/lock/api.rs index 31a3d894e7..06cf1516fb 100644 --- a/src/lock/api.rs +++ b/src/lock/api.rs @@ -26,7 +26,7 @@ pub trait RawTryLock: RawLock { /// A type-safe lock. #[repr(C)] -#[derive(Debug)] +#[derive(Debug, Default)] pub struct Lock { lock: L, data: UnsafeCell, @@ -111,17 +111,17 @@ pub struct LockGuard<'s, L: RawLock, T> { token: ManuallyDrop, } -unsafe impl<'s, L: RawLock, T: Send> Send for LockGuard<'s, L, T> {} -unsafe impl<'s, L: RawLock, T: Sync> Sync for LockGuard<'s, L, T> {} +unsafe impl Send for LockGuard<'_, L, T> {} +unsafe impl Sync for LockGuard<'_, L, T> {} -impl<'s, L: RawLock, T> LockGuard<'s, L, T> { +impl LockGuard<'_, L, T> { /// Returns the address of the referenced lock. pub fn raw(&mut self) -> usize { self.lock as *const _ as usize } } -impl<'s, L: RawLock, T> Drop for LockGuard<'s, L, T> { +impl Drop for LockGuard<'_, L, T> { fn drop(&mut self) { // SAFETY: `self.token` is not used anymore in this function, and as we are `drop`ing // `self`, it is not used anymore. @@ -133,7 +133,7 @@ impl<'s, L: RawLock, T> Drop for LockGuard<'s, L, T> { } } -impl<'s, L: RawLock, T> Deref for LockGuard<'s, L, T> { +impl Deref for LockGuard<'_, L, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -142,7 +142,7 @@ impl<'s, L: RawLock, T> Deref for LockGuard<'s, L, T> { } } -impl<'s, L: RawLock, T> DerefMut for LockGuard<'s, L, T> { +impl DerefMut for LockGuard<'_, L, T> { fn deref_mut(&mut self) -> &mut Self::Target { // SAFETY: Having a `LockGuard` means the underlying lock is held. // @@ -151,7 +151,7 @@ impl<'s, L: RawLock, T> DerefMut for LockGuard<'s, L, T> { } } -impl<'s, L: RawLock, T> LockGuard<'s, L, T> { +impl LockGuard<'_, L, T> { /// Transforms a lock guard to an address. pub fn into_raw(self) -> usize { let ret = self.lock as *const _ as usize; @@ -180,7 +180,7 @@ pub mod tests { pub fn smoke() { const LENGTH: usize = 1024; - let d = Lock::>::new(vec![]); + let d = Lock::>::default(); scope(|s| { for i in 1..LENGTH { diff --git a/src/lock/clhlock.rs b/src/lock/clhlock.rs index 9f64cf235a..4820649c10 100644 --- a/src/lock/clhlock.rs +++ b/src/lock/clhlock.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering::*}; use crossbeam_utils::{Backoff, CachePadded}; @@ -38,13 +38,13 @@ impl RawLock for ClhLock { fn lock(&self) -> Self::Token { let node = Box::into_raw(Box::new(CachePadded::new(Node::new(true)))); - let prev = self.tail.swap(node, Ordering::AcqRel); + let prev = self.tail.swap(node, AcqRel); let backoff = Backoff::new(); // SAFETY: `prev` is valid, as `self.tail` was valid at initialization and any `swap()` to // it by other `lock()`s. Hence, it points to valid memory as the thread that made `prev` // will not free it. - while unsafe { (*prev).locked.load(Ordering::Acquire) } { + while unsafe { (*prev).locked.load(Acquire) } { backoff.snooze(); } @@ -56,7 +56,7 @@ impl RawLock for ClhLock { } unsafe fn unlock(&self, token: Self::Token) { - (*token.0).locked.store(false, Ordering::Release); + (*token.0).locked.store(false, Release); } } diff --git a/src/lock/mcslock.rs b/src/lock/mcslock.rs index df23e523bc..cd925cc17b 100644 --- a/src/lock/mcslock.rs +++ b/src/lock/mcslock.rs @@ -1,5 +1,5 @@ use core::ptr; -use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering::*}; use crossbeam_utils::{Backoff, CachePadded}; @@ -41,7 +41,7 @@ impl RawLock for McsLock { fn lock(&self) -> Self::Token { let node = Box::into_raw(Box::new(CachePadded::new(Node::new()))); - let prev = self.tail.swap(node, Ordering::AcqRel); + let prev = self.tail.swap(node, AcqRel); if prev.is_null() { return Token(node); @@ -50,12 +50,12 @@ impl RawLock for McsLock { // SAFETY: `prev` is valid, so is not the initial pointer. Hence, it is a pointer from // `swap()` by another thread's `lock()`, and that thread guarantees that `prev` will not be // freed until this store is complete. - unsafe { (*prev).next.store(node, Ordering::Release) }; + unsafe { (*prev).next.store(node, Release) }; let backoff = Backoff::new(); // SAFETY: `node` was made valid above. Since other threads will not free `node`, it still // points to valid memory. - while unsafe { (*node).locked.load(Ordering::Acquire) } { + while unsafe { (*node).locked.load(Acquire) } { backoff.snooze(); } @@ -64,12 +64,12 @@ impl RawLock for McsLock { unsafe fn unlock(&self, token: Self::Token) { let node = token.0; - let mut next = (*node).next.load(Ordering::Acquire); + let mut next = (*node).next.load(Acquire); if next.is_null() { if self .tail - .compare_exchange(node, ptr::null_mut(), Ordering::Release, Ordering::Relaxed) + .compare_exchange(node, ptr::null_mut(), Release, Relaxed) .is_ok() { // SAFETY: Since `node` was the `tail`, there is no other thread blocked by this @@ -79,7 +79,7 @@ impl RawLock for McsLock { } while { - next = (*node).next.load(Ordering::Acquire); + next = (*node).next.load(Acquire); next.is_null() } {} } @@ -87,7 +87,7 @@ impl RawLock for McsLock { // SAFETY: Since `next` is not null, the thread that made `next` has finished access to // `node`, hence we have unique access to it. drop(Box::from_raw(node)); - (*next).locked.store(false, Ordering::Release); + (*next).locked.store(false, Release); } } diff --git a/src/lock/mcsparkinglock.rs b/src/lock/mcsparkinglock.rs index 53acae717f..05eecb201d 100644 --- a/src/lock/mcsparkinglock.rs +++ b/src/lock/mcsparkinglock.rs @@ -1,5 +1,5 @@ use core::ptr; -use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering::*}; use std::thread::{self, Thread}; use crossbeam_utils::CachePadded; @@ -44,17 +44,17 @@ impl RawLock for McsParkingLock { fn lock(&self) -> Self::Token { let node = Box::into_raw(Box::new(CachePadded::new(Node::new()))); - let prev = self.tail.swap(node, Ordering::AcqRel); + let prev = self.tail.swap(node, AcqRel); if prev.is_null() { return Token(node); } // SAFETY: See safety of McsLock::lock(). - unsafe { (*prev).next.store(node, Ordering::Release) }; + unsafe { (*prev).next.store(node, Release) }; // SAFETY: See safety of McsLock::lock(). - while unsafe { (*node).locked.load(Ordering::Acquire) } { + while unsafe { (*node).locked.load(Acquire) } { thread::park(); } @@ -63,12 +63,12 @@ impl RawLock for McsParkingLock { unsafe fn unlock(&self, token: Self::Token) { let node = token.0; - let mut next = (*node).next.load(Ordering::Acquire); + let mut next = (*node).next.load(Acquire); if next.is_null() { if self .tail - .compare_exchange(node, ptr::null_mut(), Ordering::Release, Ordering::Relaxed) + .compare_exchange(node, ptr::null_mut(), Release, Relaxed) .is_ok() { // SAFETY: See safety of McsLock::unlock(). @@ -77,7 +77,7 @@ impl RawLock for McsParkingLock { } while { - next = (*node).next.load(Ordering::Acquire); + next = (*node).next.load(Acquire); next.is_null() } {} } @@ -85,7 +85,7 @@ impl RawLock for McsParkingLock { // SAFETY: See safety of McsLock::unlock(). drop(Box::from_raw(node)); let thread = (*next).thread.clone(); - (*next).locked.store(false, Ordering::Release); + (*next).locked.store(false, Release); thread.unpark(); } } diff --git a/src/lock/seqlock.rs b/src/lock/seqlock.rs index 7f475cc80e..6620f3dc68 100644 --- a/src/lock/seqlock.rs +++ b/src/lock/seqlock.rs @@ -2,7 +2,7 @@ use core::mem; use core::ops::Deref; -use core::sync::atomic::{fence, AtomicUsize, Ordering}; +use core::sync::atomic::{fence, AtomicUsize, Ordering::*}; use crossbeam_utils::Backoff; @@ -25,19 +25,14 @@ impl RawSeqLock { let backoff = Backoff::new(); loop { - let seq = self.seq.load(Ordering::Relaxed); + let seq = self.seq.load(Relaxed); if seq & 1 == 0 && self .seq - .compare_exchange( - seq, - seq.wrapping_add(1), - Ordering::Acquire, - Ordering::Relaxed, - ) + .compare_exchange(seq, seq.wrapping_add(1), Acquire, Relaxed) .is_ok() { - fence(Ordering::Release); + fence(Release); return seq; } @@ -47,7 +42,7 @@ impl RawSeqLock { /// Releases a writer's lock. pub fn write_unlock(&self, seq: usize) { - self.seq.store(seq.wrapping_add(2), Ordering::Release); + self.seq.store(seq.wrapping_add(2), Release); } /// Acquires a reader's lock. @@ -55,7 +50,7 @@ impl RawSeqLock { let backoff = Backoff::new(); loop { - let seq = self.seq.load(Ordering::Acquire); + let seq = self.seq.load(Acquire); if seq & 1 == 0 { return seq; } @@ -66,9 +61,9 @@ impl RawSeqLock { /// Releases a reader's lock and validates the read. pub fn read_validate(&self, seq: usize) -> bool { - fence(Ordering::Acquire); + fence(Acquire); - seq == self.seq.load(Ordering::Relaxed) + seq == self.seq.load(Relaxed) } /// # Safety @@ -77,18 +72,13 @@ impl RawSeqLock { pub unsafe fn upgrade(&self, seq: usize) -> Result<(), ()> { if self .seq - .compare_exchange( - seq, - seq.wrapping_add(1), - Ordering::Acquire, - Ordering::Relaxed, - ) + .compare_exchange(seq, seq.wrapping_add(1), Acquire, Relaxed) .is_err() { return Err(()); } - fence(Ordering::Release); + fence(Release); Ok(()) } } diff --git a/src/lock/spinlock.rs b/src/lock/spinlock.rs index 0bc1ed95cc..90208a6659 100644 --- a/src/lock/spinlock.rs +++ b/src/lock/spinlock.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicBool, Ordering}; +use core::sync::atomic::{AtomicBool, Ordering::*}; use crossbeam_utils::Backoff; @@ -26,7 +26,7 @@ impl RawLock for SpinLock { while self .inner - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange(false, true, Acquire, Relaxed) .is_err() { backoff.snooze(); @@ -34,14 +34,14 @@ impl RawLock for SpinLock { } unsafe fn unlock(&self, _token: ()) { - self.inner.store(false, Ordering::Release); + self.inner.store(false, Release); } } impl RawTryLock for SpinLock { fn try_lock(&self) -> Result<(), ()> { self.inner - .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .compare_exchange(false, true, Acquire, Relaxed) .map(|_| ()) .map_err(|_| ()) } diff --git a/src/lock/ticketlock.rs b/src/lock/ticketlock.rs index ba8fe579e7..6cbc0e8754 100644 --- a/src/lock/ticketlock.rs +++ b/src/lock/ticketlock.rs @@ -1,4 +1,4 @@ -use core::sync::atomic::{AtomicUsize, Ordering}; +use core::sync::atomic::{AtomicUsize, Ordering::*}; use crossbeam_utils::Backoff; @@ -24,10 +24,10 @@ impl RawLock for TicketLock { type Token = usize; fn lock(&self) -> usize { - let ticket = self.next.fetch_add(1, Ordering::Relaxed); + let ticket = self.next.fetch_add(1, Relaxed); let backoff = Backoff::new(); - while self.curr.load(Ordering::Acquire) != ticket { + while self.curr.load(Acquire) != ticket { backoff.snooze(); } @@ -35,7 +35,7 @@ impl RawLock for TicketLock { } unsafe fn unlock(&self, ticket: usize) { - self.curr.store(ticket.wrapping_add(1), Ordering::Release); + self.curr.store(ticket.wrapping_add(1), Release); } } diff --git a/src/lockfree/list.rs b/src/lockfree/list.rs index 36bf4c2a59..6f2355504e 100644 --- a/src/lockfree/list.rs +++ b/src/lockfree/list.rs @@ -1,8 +1,8 @@ //! Lock-free singly linked list. -use core::cmp::Ordering::{Equal, Greater, Less}; +use core::cmp::Ordering::*; use core::mem; -use core::sync::atomic::Ordering; +use core::sync::atomic::Ordering::*; use crossbeam_epoch::{Atomic, Guard, Owned, Shared}; @@ -108,7 +108,7 @@ where let Some(curr_node) = (unsafe { self.curr.as_ref() }) else { break false; }; - let next = curr_node.next.load(Ordering::Acquire, guard); + let next = curr_node.next.load(Acquire, guard); // - finding stage is done if cursor.curr advancement stops // - advance cursor.curr if (.next is marked) || (cursor.curr < key) @@ -139,20 +139,14 @@ where // cleanup marked nodes between prev and curr self.prev - .compare_exchange( - prev_next, - self.curr, - Ordering::Release, - Ordering::Relaxed, - guard, - ) + .compare_exchange(prev_next, self.curr, Release, Relaxed, guard) .map_err(|_| ())?; // defer_destroy from cursor.prev.load() to cursor.curr (exclusive) let mut node = prev_next; while node.with_tag(0) != self.curr { // SAFETY: All nodes in the unlinked chain are not null. - let next = unsafe { node.deref() }.next.load(Ordering::Relaxed, guard); + let next = unsafe { node.deref() }.next.load(Relaxed, guard); // SAFETY: we unlinked the chain with above CAS. unsafe { guard.defer_destroy(node) }; node = next; @@ -170,12 +164,12 @@ where let Some(curr_node) = (unsafe { self.curr.as_ref() }) else { return Ok(false); }; - let mut next = curr_node.next.load(Ordering::Acquire, guard); + let mut next = curr_node.next.load(Acquire, guard); if next.tag() != 0 { next = next.with_tag(0); self.prev - .compare_exchange(self.curr, next, Ordering::Release, Ordering::Relaxed, guard) + .compare_exchange(self.curr, next, Release, Relaxed, guard) .map_err(|_| ())?; unsafe { guard.defer_destroy(self.curr) }; self.curr = next; @@ -202,21 +196,25 @@ where }; match curr_node.key.cmp(key) { Less => { - self.curr = curr_node.next.load(Ordering::Acquire, guard); + self.curr = curr_node.next.load(Acquire, guard); // NOTE: unnecessary (this function is expected to be used only for `lookup`) self.prev = &curr_node.next; continue; } - Equal => break curr_node.next.load(Ordering::Relaxed, guard).tag() == 0, + Equal => break curr_node.next.load(Relaxed, guard).tag() == 0, Greater => break false, } }) } /// Lookups the value. + /// + /// # Panics + /// + /// Panics if the cursor's current value is a null. #[inline] - pub fn lookup(&self) -> Option<&'g V> { - unsafe { self.curr.as_ref() }.map(|n| &n.value) + pub fn lookup(&self) -> &'g V { + &unsafe { self.curr.as_ref() }.unwrap().value } /// Inserts a value. @@ -226,14 +224,11 @@ where node: Owned>, guard: &'g Guard, ) -> Result<(), Owned>> { - node.next.store(self.curr, Ordering::Relaxed); - match self.prev.compare_exchange( - self.curr, - node, - Ordering::Release, - Ordering::Relaxed, - guard, - ) { + node.next.store(self.curr, Relaxed); + match self + .prev + .compare_exchange(self.curr, node, Release, Relaxed, guard) + { Ok(node) => { self.curr = node; Ok(()) @@ -250,14 +245,14 @@ where // Release: to release current view of the deleting thread on this mark. // Acquire: to ensure that if the latter CAS succeeds, then the thread that reads `next` through `prev` will be safe. - let next = curr_node.next.fetch_or(1, Ordering::AcqRel, guard); + let next = curr_node.next.fetch_or(1, AcqRel, guard); if next.tag() == 1 { return Err(()); } if self .prev - .compare_exchange(self.curr, next, Ordering::Release, Ordering::Relaxed, guard) + .compare_exchange(self.curr, next, Release, Relaxed, guard) .is_ok() { // SAFETY: we are unlinker of curr. As the lifetime of the guard extends to the return @@ -283,7 +278,7 @@ where /// Creates the head cursor. #[inline] pub fn head<'g>(&'g self, guard: &'g Guard) -> Cursor<'g, K, V> { - Cursor::new(&self.head, self.head.load(Ordering::Acquire, guard)) + Cursor::new(&self.head, self.head.load(Acquire, guard)) } /// Finds a key using the given find strategy. @@ -307,7 +302,8 @@ where { let (found, cursor) = self.find(key, &find, guard); if found { - cursor.lookup() + // `found` means current node cannot be null, so lookup won't panic. + Some(cursor.lookup()) } else { None } diff --git a/src/lockfree/queue.rs b/src/lockfree/queue.rs index 6b531b9968..06c95d3c26 100644 --- a/src/lockfree/queue.rs +++ b/src/lockfree/queue.rs @@ -6,15 +6,14 @@ //! Algorithms. PODC 1996. use core::mem::{self, MaybeUninit}; -use core::sync::atomic::Ordering; +use core::sync::atomic::Ordering::*; use crossbeam_epoch::{unprotected, Atomic, Guard, Owned, Shared}; use crossbeam_utils::CachePadded; /// Michael-Scott queue. // The representation here is a singly-linked list, with a sentinel node at the front. In general -// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or -// all `Blocked` (requests for data from blocked threads). +// the `tail` pointer may lag behind the actual tail. #[derive(Debug)] pub struct Queue { head: CachePadded>>, @@ -23,12 +22,12 @@ pub struct Queue { #[derive(Debug)] struct Node { - /// The slot in which a value of type `T` can be stored. + /// The place in which a value of type `T` can be stored. /// /// The type of `data` is `MaybeUninit` because a `Node` doesn't always contain a `T`. - /// For example, the sentinel node in a queue never contains a value: its slot is always empty. - /// Other nodes start their life with a push operation and contain a value until it gets popped - /// out. After that such empty nodes get added to the collector for destruction. + /// For example, the initial sentinel node in a queue never contains a value: its data is always + /// uninitialized. Other nodes start their life with a push operation and contain a value until + /// it gets popped out. data: MaybeUninit, next: Atomic>, @@ -52,8 +51,8 @@ impl Default for Queue { }) .into_shared(unsafe { unprotected() }); - q.head.store(sentinel, Ordering::Relaxed); - q.tail.store(sentinel, Ordering::Relaxed); + q.head.store(sentinel, Relaxed); + q.tail.store(sentinel, Relaxed); q } } @@ -74,44 +73,30 @@ impl Queue { loop { // We push onto the tail, so we'll start optimistically by looking there first. - let tail = self.tail.load(Ordering::Acquire, guard); + let tail = self.tail.load(Acquire, guard); // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed. let tail_ref = unsafe { tail.deref() }; - let next = tail_ref.next.load(Ordering::Acquire, guard); + let next = tail_ref.next.load(Acquire, guard); // If `tail` is not the actual tail, try to "help" by moving the tail pointer forward. if !next.is_null() { - let _ = self.tail.compare_exchange( - tail, - next, - Ordering::Release, - Ordering::Relaxed, - guard, - ); + let _ = self + .tail + .compare_exchange(tail, next, Release, Relaxed, guard); continue; } // looks like the actual tail; attempt to link at `tail.next`. if tail_ref .next - .compare_exchange( - Shared::null(), - new, - Ordering::Release, - Ordering::Relaxed, - guard, - ) + .compare_exchange(Shared::null(), new, Release, Relaxed, guard) .is_ok() { // try to move the tail pointer forward. - let _ = self.tail.compare_exchange( - tail, - new, - Ordering::Release, - Ordering::Relaxed, - guard, - ); + let _ = self + .tail + .compare_exchange(tail, new, Release, Relaxed, guard); break; } } @@ -122,27 +107,23 @@ impl Queue { /// Returns `None` if the queue is observed to be empty. pub fn try_pop(&self, guard: &Guard) -> Option { loop { - let head = self.head.load(Ordering::Acquire, guard); - let next = unsafe { head.deref() }.next.load(Ordering::Acquire, guard); + let head = self.head.load(Acquire, guard); + let next = unsafe { head.deref() }.next.load(Acquire, guard); let next_ref = unsafe { next.as_ref() }?; // Moves `tail` if it's stale. Relaxed load is enough because if tail == head, then the // messages for that node are already acquired. - let tail = self.tail.load(Ordering::Relaxed, guard); + let tail = self.tail.load(Relaxed, guard); if tail == head { - let _ = self.tail.compare_exchange( - tail, - next, - Ordering::Release, - Ordering::Relaxed, - guard, - ); + let _ = self + .tail + .compare_exchange(tail, next, Release, Relaxed, guard); } if self .head - .compare_exchange(head, next, Ordering::Release, Ordering::Relaxed, guard) + .compare_exchange(head, next, Release, Relaxed, guard) .is_ok() { // Since the above `compare_exchange()` succeeded, `head` is detached from `self` so @@ -211,8 +192,8 @@ mod test { pub fn is_empty(&self) -> bool { let guard = &pin(); - let head = self.queue.head.load(Ordering::Acquire, guard); - let next = unsafe { head.deref() }.next.load(Ordering::Acquire, guard); + let head = self.queue.head.load(Acquire, guard); + let next = unsafe { head.deref() }.next.load(Acquire, guard); next.is_null() } @@ -426,7 +407,6 @@ mod test { q.push(20); q.push(20); assert!(!q.is_empty()); - assert!(!q.is_empty()); assert!(q.try_pop().is_some()); } } diff --git a/src/lockfree/stack.rs b/src/lockfree/stack.rs index 70c5cf45e8..806812189b 100644 --- a/src/lockfree/stack.rs +++ b/src/lockfree/stack.rs @@ -1,6 +1,6 @@ use core::mem::{self, ManuallyDrop}; use core::ptr; -use core::sync::atomic::Ordering; +use core::sync::atomic::Ordering::*; use crossbeam_epoch::{Atomic, Owned, Shared}; @@ -46,12 +46,12 @@ impl Stack { let guard = crossbeam_epoch::pin(); loop { - let head = self.head.load(Ordering::Relaxed, &guard); + let head = self.head.load(Relaxed, &guard); n.next = head.as_raw(); match self .head - .compare_exchange(head, n, Ordering::Release, Ordering::Relaxed, &guard) + .compare_exchange(head, n, Release, Relaxed, &guard) { Ok(_) => break, Err(e) => n = e.new, @@ -65,13 +65,13 @@ impl Stack { pub fn pop(&self) -> Option { let guard = crossbeam_epoch::pin(); loop { - let head = self.head.load(Ordering::Acquire, &guard); + let head = self.head.load(Acquire, &guard); let h = unsafe { head.as_ref() }?; let next = Shared::from(h.next); if self .head - .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed, &guard) + .compare_exchange(head, next, Relaxed, Relaxed, &guard) .is_ok() { // Since the above `compare_exchange()` succeeded, `head` is detached from `self` so @@ -93,7 +93,7 @@ impl Stack { /// Returns `true` if the stack is empty. pub fn is_empty(&self) -> bool { let guard = crossbeam_epoch::pin(); - self.head.load(Ordering::Acquire, &guard).is_null() + self.head.load(Acquire, &guard).is_null() } } @@ -129,6 +129,6 @@ mod test { } }); - assert!(stack.pop().is_none()); + assert!(stack.is_empty()); } }