Skip to content

Commit

Permalink
* lock, lockfree: make Ordering args less verbose * bump rust.
Browse files Browse the repository at this point in the history
* minor cleanups
  • Loading branch information
Lee-Janggun committed Dec 21, 2023
1 parent 47ed24e commit 417ba16
Show file tree
Hide file tree
Showing 11 changed files with 106 additions and 140 deletions.
2 changes: 1 addition & 1 deletion rust-toolchain
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.71.0
1.74.1
18 changes: 9 additions & 9 deletions src/lock/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub trait RawTryLock: RawLock {

/// A type-safe lock.
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Default)]
pub struct Lock<L: RawLock, T> {
lock: L,
data: UnsafeCell<T>,
Expand Down Expand Up @@ -111,17 +111,17 @@ pub struct LockGuard<'s, L: RawLock, T> {
token: ManuallyDrop<L::Token>,
}

unsafe impl<'s, L: RawLock, T: Send> Send for LockGuard<'s, L, T> {}
unsafe impl<'s, L: RawLock, T: Sync> Sync for LockGuard<'s, L, T> {}
unsafe impl<L: RawLock, T: Send> Send for LockGuard<'_, L, T> {}
unsafe impl<L: RawLock, T: Sync> Sync for LockGuard<'_, L, T> {}

impl<'s, L: RawLock, T> LockGuard<'s, L, T> {
impl<L: RawLock, T> LockGuard<'_, L, T> {
/// Returns the address of the referenced lock.
pub fn raw(&mut self) -> usize {
self.lock as *const _ as usize
}
}

impl<'s, L: RawLock, T> Drop for LockGuard<'s, L, T> {
impl<L: RawLock, T> Drop for LockGuard<'_, L, T> {
fn drop(&mut self) {
// SAFETY: `self.token` is not used anymore in this function, and as we are `drop`ing
// `self`, it is not used anymore.
Expand All @@ -133,7 +133,7 @@ impl<'s, L: RawLock, T> Drop for LockGuard<'s, L, T> {
}
}

impl<'s, L: RawLock, T> Deref for LockGuard<'s, L, T> {
impl<L: RawLock, T> Deref for LockGuard<'_, L, T> {
type Target = T;

fn deref(&self) -> &Self::Target {
Expand All @@ -142,7 +142,7 @@ impl<'s, L: RawLock, T> Deref for LockGuard<'s, L, T> {
}
}

impl<'s, L: RawLock, T> DerefMut for LockGuard<'s, L, T> {
impl<L: RawLock, T> DerefMut for LockGuard<'_, L, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: Having a `LockGuard` means the underlying lock is held.
//
Expand All @@ -151,7 +151,7 @@ impl<'s, L: RawLock, T> DerefMut for LockGuard<'s, L, T> {
}
}

impl<'s, L: RawLock, T> LockGuard<'s, L, T> {
impl<L: RawLock, T> LockGuard<'_, L, T> {
/// Transforms a lock guard to an address.
pub fn into_raw(self) -> usize {
let ret = self.lock as *const _ as usize;
Expand Down Expand Up @@ -180,7 +180,7 @@ pub mod tests {

pub fn smoke<L: RawLock>() {
const LENGTH: usize = 1024;
let d = Lock::<L, Vec<usize>>::new(vec![]);
let d = Lock::<L, Vec<usize>>::default();

scope(|s| {
for i in 1..LENGTH {
Expand Down
8 changes: 4 additions & 4 deletions src/lock/clhlock.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering::*};

use crossbeam_utils::{Backoff, CachePadded};

Expand Down Expand Up @@ -38,13 +38,13 @@ impl RawLock for ClhLock {

fn lock(&self) -> Self::Token {
let node = Box::into_raw(Box::new(CachePadded::new(Node::new(true))));
let prev = self.tail.swap(node, Ordering::AcqRel);
let prev = self.tail.swap(node, AcqRel);
let backoff = Backoff::new();

// SAFETY: `prev` is valid, as `self.tail` was valid at initialization and any `swap()` to
// it by other `lock()`s. Hence, it points to valid memory as the thread that made `prev`
// will not free it.
while unsafe { (*prev).locked.load(Ordering::Acquire) } {
while unsafe { (*prev).locked.load(Acquire) } {
backoff.snooze();
}

Expand All @@ -56,7 +56,7 @@ impl RawLock for ClhLock {
}

unsafe fn unlock(&self, token: Self::Token) {
(*token.0).locked.store(false, Ordering::Release);
(*token.0).locked.store(false, Release);
}
}

Expand Down
16 changes: 8 additions & 8 deletions src/lock/mcslock.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use core::ptr;
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering::*};

use crossbeam_utils::{Backoff, CachePadded};

Expand Down Expand Up @@ -41,7 +41,7 @@ impl RawLock for McsLock {

fn lock(&self) -> Self::Token {
let node = Box::into_raw(Box::new(CachePadded::new(Node::new())));
let prev = self.tail.swap(node, Ordering::AcqRel);
let prev = self.tail.swap(node, AcqRel);

if prev.is_null() {
return Token(node);
Expand All @@ -50,12 +50,12 @@ impl RawLock for McsLock {
// SAFETY: `prev` is valid, so is not the initial pointer. Hence, it is a pointer from
// `swap()` by another thread's `lock()`, and that thread guarantees that `prev` will not be
// freed until this store is complete.
unsafe { (*prev).next.store(node, Ordering::Release) };
unsafe { (*prev).next.store(node, Release) };

let backoff = Backoff::new();
// SAFETY: `node` was made valid above. Since other threads will not free `node`, it still
// points to valid memory.
while unsafe { (*node).locked.load(Ordering::Acquire) } {
while unsafe { (*node).locked.load(Acquire) } {
backoff.snooze();
}

Expand All @@ -64,12 +64,12 @@ impl RawLock for McsLock {

unsafe fn unlock(&self, token: Self::Token) {
let node = token.0;
let mut next = (*node).next.load(Ordering::Acquire);
let mut next = (*node).next.load(Acquire);

if next.is_null() {
if self
.tail
.compare_exchange(node, ptr::null_mut(), Ordering::Release, Ordering::Relaxed)
.compare_exchange(node, ptr::null_mut(), Release, Relaxed)
.is_ok()
{
// SAFETY: Since `node` was the `tail`, there is no other thread blocked by this
Expand All @@ -79,15 +79,15 @@ impl RawLock for McsLock {
}

while {
next = (*node).next.load(Ordering::Acquire);
next = (*node).next.load(Acquire);
next.is_null()
} {}
}

// SAFETY: Since `next` is not null, the thread that made `next` has finished access to
// `node`, hence we have unique access to it.
drop(Box::from_raw(node));
(*next).locked.store(false, Ordering::Release);
(*next).locked.store(false, Release);
}
}

Expand Down
16 changes: 8 additions & 8 deletions src/lock/mcsparkinglock.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use core::ptr;
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use core::sync::atomic::{AtomicBool, AtomicPtr, Ordering::*};
use std::thread::{self, Thread};

use crossbeam_utils::CachePadded;
Expand Down Expand Up @@ -44,17 +44,17 @@ impl RawLock for McsParkingLock {

fn lock(&self) -> Self::Token {
let node = Box::into_raw(Box::new(CachePadded::new(Node::new())));
let prev = self.tail.swap(node, Ordering::AcqRel);
let prev = self.tail.swap(node, AcqRel);

if prev.is_null() {
return Token(node);
}

// SAFETY: See safety of McsLock::lock().
unsafe { (*prev).next.store(node, Ordering::Release) };
unsafe { (*prev).next.store(node, Release) };

// SAFETY: See safety of McsLock::lock().
while unsafe { (*node).locked.load(Ordering::Acquire) } {
while unsafe { (*node).locked.load(Acquire) } {
thread::park();
}

Expand All @@ -63,12 +63,12 @@ impl RawLock for McsParkingLock {

unsafe fn unlock(&self, token: Self::Token) {
let node = token.0;
let mut next = (*node).next.load(Ordering::Acquire);
let mut next = (*node).next.load(Acquire);

if next.is_null() {
if self
.tail
.compare_exchange(node, ptr::null_mut(), Ordering::Release, Ordering::Relaxed)
.compare_exchange(node, ptr::null_mut(), Release, Relaxed)
.is_ok()
{
// SAFETY: See safety of McsLock::unlock().
Expand All @@ -77,15 +77,15 @@ impl RawLock for McsParkingLock {
}

while {
next = (*node).next.load(Ordering::Acquire);
next = (*node).next.load(Acquire);
next.is_null()
} {}
}

// SAFETY: See safety of McsLock::unlock().
drop(Box::from_raw(node));
let thread = (*next).thread.clone();
(*next).locked.store(false, Ordering::Release);
(*next).locked.store(false, Release);
thread.unpark();
}
}
Expand Down
30 changes: 10 additions & 20 deletions src/lock/seqlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
use core::mem;
use core::ops::Deref;
use core::sync::atomic::{fence, AtomicUsize, Ordering};
use core::sync::atomic::{fence, AtomicUsize, Ordering::*};

use crossbeam_utils::Backoff;

Expand All @@ -25,19 +25,14 @@ impl RawSeqLock {
let backoff = Backoff::new();

loop {
let seq = self.seq.load(Ordering::Relaxed);
let seq = self.seq.load(Relaxed);
if seq & 1 == 0
&& self
.seq
.compare_exchange(
seq,
seq.wrapping_add(1),
Ordering::Acquire,
Ordering::Relaxed,
)
.compare_exchange(seq, seq.wrapping_add(1), Acquire, Relaxed)
.is_ok()
{
fence(Ordering::Release);
fence(Release);
return seq;
}

Expand All @@ -47,15 +42,15 @@ impl RawSeqLock {

/// Releases a writer's lock.
pub fn write_unlock(&self, seq: usize) {
self.seq.store(seq.wrapping_add(2), Ordering::Release);
self.seq.store(seq.wrapping_add(2), Release);
}

/// Acquires a reader's lock.
pub fn read_begin(&self) -> usize {
let backoff = Backoff::new();

loop {
let seq = self.seq.load(Ordering::Acquire);
let seq = self.seq.load(Acquire);
if seq & 1 == 0 {
return seq;
}
Expand All @@ -66,9 +61,9 @@ impl RawSeqLock {

/// Releases a reader's lock and validates the read.
pub fn read_validate(&self, seq: usize) -> bool {
fence(Ordering::Acquire);
fence(Acquire);

seq == self.seq.load(Ordering::Relaxed)
seq == self.seq.load(Relaxed)
}

/// # Safety
Expand All @@ -77,18 +72,13 @@ impl RawSeqLock {
pub unsafe fn upgrade(&self, seq: usize) -> Result<(), ()> {
if self
.seq
.compare_exchange(
seq,
seq.wrapping_add(1),
Ordering::Acquire,
Ordering::Relaxed,
)
.compare_exchange(seq, seq.wrapping_add(1), Acquire, Relaxed)
.is_err()
{
return Err(());
}

fence(Ordering::Release);
fence(Release);
Ok(())
}
}
Expand Down
8 changes: 4 additions & 4 deletions src/lock/spinlock.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicBool, Ordering};
use core::sync::atomic::{AtomicBool, Ordering::*};

use crossbeam_utils::Backoff;

Expand Down Expand Up @@ -26,22 +26,22 @@ impl RawLock for SpinLock {

while self
.inner
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.compare_exchange(false, true, Acquire, Relaxed)
.is_err()
{
backoff.snooze();
}
}

unsafe fn unlock(&self, _token: ()) {
self.inner.store(false, Ordering::Release);
self.inner.store(false, Release);
}
}

impl RawTryLock for SpinLock {
fn try_lock(&self) -> Result<(), ()> {
self.inner
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.compare_exchange(false, true, Acquire, Relaxed)
.map(|_| ())
.map_err(|_| ())
}
Expand Down
8 changes: 4 additions & 4 deletions src/lock/ticketlock.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use core::sync::atomic::{AtomicUsize, Ordering};
use core::sync::atomic::{AtomicUsize, Ordering::*};

use crossbeam_utils::Backoff;

Expand All @@ -24,18 +24,18 @@ impl RawLock for TicketLock {
type Token = usize;

fn lock(&self) -> usize {
let ticket = self.next.fetch_add(1, Ordering::Relaxed);
let ticket = self.next.fetch_add(1, Relaxed);
let backoff = Backoff::new();

while self.curr.load(Ordering::Acquire) != ticket {
while self.curr.load(Acquire) != ticket {
backoff.snooze();
}

ticket
}

unsafe fn unlock(&self, ticket: usize) {
self.curr.store(ticket.wrapping_add(1), Ordering::Release);
self.curr.store(ticket.wrapping_add(1), Release);
}
}

Expand Down
Loading

0 comments on commit 417ba16

Please sign in to comment.