Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[feat] add rwlock, condvar, semaphore and barrier in axsync #186

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion api/axfeat/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ smp = ["axhal/smp", "axruntime/smp", "axtask?/smp", "kspin/smp"]
fp_simd = ["axhal/fp_simd"]

# Interrupts
irq = ["axhal/irq", "axruntime/irq", "axtask?/irq"]
irq = ["axhal/irq", "axruntime/irq", "axtask?/irq", "axsync/irq"]

# Memory
alloc = ["axalloc", "axruntime/alloc"]
Expand Down
5 changes: 4 additions & 1 deletion modules/axsync/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,15 @@ repository = "https://github.com/arceos-org/arceos/tree/main/modules/axsync"
documentation = "https://arceos-org.github.io/arceos/axsync/index.html"

[features]
multitask = ["axtask/multitask"]
multitask = ["axtask/multitask", "dep:axhal"]
irq = ["axtask/irq"]
default = []

[dependencies]
kspin = "0.1"
cfg-if = "1.0"
axtask = { workspace = true }
axhal = { workspace = true, optional = true }

[dev-dependencies]
rand = "0.8"
Expand Down
108 changes: 108 additions & 0 deletions modules/axsync/src/barrier.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
//! Synchronization primitive allowing multiple threads to synchronize the
//! beginning of some computation.
//!
//! Implementation adapted from the 'Barrier' type of the standard library. See:
//! <https://doc.rust-lang.org/std/sync/struct.Barrier.html>
//!
//! Note: [`Barrier`] is not available when the `multitask` feature is disabled.

#[cfg(test)]
mod tests;

use core::fmt;

use crate::condvar::Condvar;
use crate::Mutex;

/// A barrier enables multiple threads to synchronize the beginning
/// of some computation.
pub struct Barrier {
lock: Mutex<BarrierState>,
cvar: Condvar,
num_threads: usize,
}

// The inner state of a double barrier
struct BarrierState {
count: usize,
generation_id: usize,
}

/// A `BarrierWaitResult` is returned by [`Barrier::wait()`] when all threads
/// in the [`Barrier`] have rendezvoused.
pub struct BarrierWaitResult(bool);

impl fmt::Debug for Barrier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Barrier").finish_non_exhaustive()
}
}

impl Barrier {
/// Creates a new barrier that can block a given number of threads.
///
/// A barrier will block `n`-1 threads which call [`wait()`] and then wake
/// up all threads at once when the `n`th thread calls [`wait()`].
///
/// [`wait()`]: Barrier::wait
pub const fn new(n: usize) -> Self {
Self {
lock: Mutex::new(BarrierState {
count: 0,
generation_id: 0,
}),
cvar: Condvar::new(),
num_threads: n,
}
}

/// Blocks the current thread until all threads have rendezvoused here.
///
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
///
/// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
/// returns `true` from [`BarrierWaitResult::is_leader()`] when returning
/// from this function, and all other threads will receive a result that
/// will return `false` from [`BarrierWaitResult::is_leader()`].
pub fn wait(&self) -> BarrierWaitResult {
let mut lock = self.lock.lock();
lock.count += 1;

if lock.count < self.num_threads {
// not the leader
let local_gen = lock.generation_id;
let _guard = self
.cvar
.wait_while(lock, |state| local_gen == state.generation_id);
BarrierWaitResult(false)
} else {
// this thread is the leader,
// and is responsible for incrementing the generation
lock.count = 0;
lock.generation_id = lock.generation_id.wrapping_add(1);
self.cvar.notify_all();
BarrierWaitResult(true)
}
}
}

impl fmt::Debug for BarrierWaitResult {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BarrierWaitResult")
.field("is_leader", &self.is_leader())
.finish()
}
}

impl BarrierWaitResult {
/// Returns whether this thread from [`wait`] is the "leader thread".
///
/// Only one thread will have `true` returned from their result, all other
/// threads will have `false` returned.
///
/// [`wait`]: struct.Barrier.html#method.wait
pub fn is_leader(&self) -> bool {
self.0
}
}
102 changes: 102 additions & 0 deletions modules/axsync/src/barrier/tests.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
use axtask as thread;

use crate::Barrier;

const NUM_TASKS: u32 = 10;
const NUM_ITERS: u32 = 10_000;

#[test]
fn test_barrier() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

static BARRIER: Barrier = Barrier::new(NUM_TASKS as usize);

let mut join_handlers = Vec::new();

fn rendezvous() {
for _ in 0..NUM_ITERS {
BARRIER.wait();
}
}

for _ in 0..NUM_TASKS {
join_handlers.push(thread::spawn(rendezvous));
}

// Wait for all threads to finish.
for join_handler in join_handlers {
join_handler.join();
}

println!("Barrier test OK");
}

#[test]
fn test_wait_result() {
let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

static BARRIER: Barrier = Barrier::new(1);

// The first thread to call `wait` will be the leader.
assert_eq!(BARRIER.wait().is_leader(), true);

// Since the barrier is reusable, the next thread to call `wait` will also be the leader.
assert_eq!(BARRIER.wait().is_leader(), true);

static BARRIER2: Barrier = Barrier::new(2);

thread::spawn(|| {
assert_eq!(BARRIER2.wait().is_leader(), true);
});

// The first thread to call `wait` won't be the leader.
assert_eq!(BARRIER2.wait().is_leader(), false);

thread::yield_now();

println!("BarrierWaitResult test OK");
}

#[test]
fn test_barrier_wait_result() {
use std::sync::mpsc::{channel, TryRecvError};
use std::sync::Arc;

let _lock = crate::tests::SEQ.lock();
crate::tests::INIT.call_once(thread::init_scheduler);

let barrier = Arc::new(Barrier::new(NUM_TASKS as _));
let (tx, rx) = channel();

let mut join_handlers = Vec::new();

for _ in 0..NUM_TASKS - 1 {
let c = barrier.clone();
let tx = tx.clone();
join_handlers.push(thread::spawn(move || {
tx.send(c.wait().is_leader()).unwrap();
}));
}

// At this point, all spawned threads should be blocked,
// so we shouldn't get anything from the port
assert!(matches!(rx.try_recv(), Err(TryRecvError::Empty)));

let mut leader_found = barrier.wait().is_leader();

// Wait for all threads to finish.
for join_handler in join_handlers {
join_handler.join();
}

// Now, the barrier is cleared and we should get data.
for _ in 0..NUM_TASKS - 1 {
if rx.recv().unwrap() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
}
Loading
Loading