Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fune-tune the Ordering for all the atomics #447

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions samplecode/localattestation/attestation/src/func.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,23 @@ extern {
dest_enclave_id:sgx_enclave_id_t) -> sgx_status_t;
}

// For AtomicPtr, the methods load and store read and write the address.
// To load the contents is pretty much another operation, but it is not
// atomic and and it is very unsafe. If there is access to the content
// where the pointer is pointed, it is best to use Acquire/Release to
// prevent get uninitialized memory.
static CALLBACK_FN: AtomicPtr<()> = AtomicPtr::new(0 as * mut ());

pub fn init(cb: Callback) {
let ptr = CALLBACK_FN.load(Ordering::Relaxed);
let ptr = CALLBACK_FN.load(Ordering::Acquire);
if ptr.is_null() {
let ptr: * mut Callback = Box::into_raw(Box::new(cb));
CALLBACK_FN.store(ptr as * mut (), Ordering::Relaxed);
CALLBACK_FN.store(ptr as * mut (), Ordering::Release);
}
}

fn get_callback() -> Option<&'static Callback>{
let ptr = CALLBACK_FN.load(Ordering::Relaxed) as *mut Callback;
let ptr = CALLBACK_FN.load(Ordering::Acquire) as *mut Callback;
if ptr.is_null() {
return None;
}
Expand Down
11 changes: 8 additions & 3 deletions sgx_no_tstd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ fn begin_panic_handler(_info: &PanicInfo<'_>) -> ! {
#[no_mangle]
unsafe extern "C" fn rust_eh_personality() {}

/// Note about memory ordering:
///
/// HOOK here is used as an allocation error handler, which needs to make sure the
/// memory contents are initialized before storing the memory address to the HOOK.
/// So use Release and Acquire is enough.
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());

/// Registers a custom allocation error hook, replacing any that was previously registered.
Expand All @@ -59,7 +64,7 @@ static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
///
/// The allocation error hook is a global resource.
pub fn set_alloc_error_hook(hook: fn(Layout)) {
HOOK.store(hook as *mut (), Ordering::SeqCst);
HOOK.store(hook as *mut (), Ordering::Release);
}

/// Unregisters the current allocation error hook, returning it.
Expand All @@ -68,7 +73,7 @@ pub fn set_alloc_error_hook(hook: fn(Layout)) {
///
/// If no custom hook is registered, the default hook will be returned.
pub fn take_alloc_error_hook() -> fn(Layout) {
let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst);
let hook = HOOK.swap(ptr::null_mut(), Ordering::AcqRel);
if hook.is_null() {
default_alloc_error_hook
} else {
Expand All @@ -80,7 +85,7 @@ fn default_alloc_error_hook(_layout: Layout) {}

#[alloc_error_handler]
pub fn rust_oom(layout: Layout) -> ! {
let hook = HOOK.load(Ordering::SeqCst);
let hook = HOOK.load(Ordering::Acquire);
let hook: fn(Layout) = if hook.is_null() {
default_alloc_error_hook
} else {
Expand Down
4 changes: 4 additions & 0 deletions sgx_tstd/hashbrown/benches/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ impl Iterator for RandomKeys {

// Just an arbitrary side effect to make the maps not shortcircuit to the non-dropping path
// when dropping maps/entries (most real world usages likely have drop in the key or value)
//
// Note about memory ordering:
// Here SIDE_EFFECT does't synchronize with other varibales, it is just a counting
// variable, using ‘Relaxed’ is enough to ensure the correctness of the program.
lazy_static::lazy_static! {
static ref SIDE_EFFECT: AtomicUsize = AtomicUsize::new(0);
}
Expand Down
11 changes: 8 additions & 3 deletions sgx_tstd/src/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,11 @@ pub use alloc_crate::alloc::*;

pub use sgx_alloc::System;

/// Note about memory ordering:
///
/// HOOK here is used as an allocation error handler, which needs to make sure the
/// memory contents are initialized before storing the memory address to the HOOK.
/// So using Release/Acquire is enough.
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());

/// Registers a custom allocation error hook, replacing any that was previously registered.
Expand Down Expand Up @@ -106,7 +111,7 @@ static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
/// set_alloc_error_hook(custom_alloc_error_hook);
/// ```
pub fn set_alloc_error_hook(hook: fn(Layout)) {
HOOK.store(hook as *mut (), Ordering::SeqCst);
HOOK.store(hook as *mut (), Ordering::Release);
}

/// Unregisters the current allocation error hook, returning it.
Expand All @@ -115,7 +120,7 @@ pub fn set_alloc_error_hook(hook: fn(Layout)) {
///
/// If no custom hook is registered, the default hook will be returned.
pub fn take_alloc_error_hook() -> fn(Layout) {
let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst);
let hook = HOOK.swap(ptr::null_mut(), Ordering::AcqRel);
if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }
}

Expand All @@ -137,7 +142,7 @@ fn default_alloc_error_hook(layout: Layout) {
#[doc(hidden)]
#[alloc_error_handler]
pub fn rust_oom(layout: Layout) -> ! {
let hook = HOOK.load(Ordering::SeqCst);
let hook = HOOK.load(Ordering::Acquire);
let hook: fn(Layout) =
if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } };
hook(layout);
Expand Down
7 changes: 5 additions & 2 deletions sgx_tstd/src/panic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,9 @@ impl BacktraceStyle {
// that backtrace.
//
// Internally stores equivalent of an Option<BacktraceStyle>.
//
// Here SHOULD_CAPTURE is just used as a shared variety in multithreaded environment,
// and doesn't synchronize with other variables. Using relaxed here is enough.
#[cfg(feature = "backtrace")]
static SHOULD_CAPTURE: AtomicUsize = AtomicUsize::new(0);

Expand All @@ -258,7 +261,7 @@ static SHOULD_CAPTURE: AtomicUsize = AtomicUsize::new(0);
/// environment variable; see the details in [`get_backtrace_style`].
#[cfg(feature = "backtrace")]
pub fn set_backtrace_style(style: BacktraceStyle) {
SHOULD_CAPTURE.store(style.as_usize(), Ordering::Release);
SHOULD_CAPTURE.store(style.as_usize(), Ordering::Relaxed);
}

/// Checks whether the standard library's panic hook will capture and print a
Expand All @@ -284,5 +287,5 @@ pub fn set_backtrace_style(style: BacktraceStyle) {
/// Returns `None` if backtraces aren't currently supported.
#[cfg(feature = "backtrace")]
pub fn get_backtrace_style() -> Option<BacktraceStyle> {
BacktraceStyle::from_usize(SHOULD_CAPTURE.load(Ordering::Acquire))
BacktraceStyle::from_usize(SHOULD_CAPTURE.load(Ordering::Relaxed))
}
6 changes: 5 additions & 1 deletion sgx_tstd/src/panicking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,10 @@ fn default_hook(info: &PanicInfo<'_>) {

#[cfg(feature = "backtrace")]
{
// Note about memory ordering:
// Here FIRST_PANIC just using in this function. the only read and load
// operation just do in line 292, and it doesn't sychornized with other
// varieties, using relaxed is enough to ensure safety here.
static FIRST_PANIC: AtomicBool = AtomicBool::new(true);

match backtrace {
Expand All @@ -285,7 +289,7 @@ fn default_hook(info: &PanicInfo<'_>) {
drop(backtrace::print(err, crate::sys::backtrace::PrintFmt::Full))
}
Some(BacktraceStyle::Off) => {
if FIRST_PANIC.swap(false, Ordering::SeqCst) {
if FIRST_PANIC.swap(false, Ordering::Relaxed) {
let _ = writeln!(
err,
"note: call backtrace::enable_backtrace with 'PrintFormat::Short/Full' for a backtrace."
Expand Down
12 changes: 9 additions & 3 deletions sgx_tstd/src/sync/mpsc/blocking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@ use crate::time::Instant;
#[cfg(not(feature = "untrusted_time"))]
use crate::untrusted::time::InstantEx;

/// Note about memory ordering:
///
/// Here woken needs to synchronize with thread, So using Acquire and
/// Release is enough. Success in CAS is safer to use AcqRel, fail in CAS
/// does not synchronize other variables, and using Relaxed can ensure the
/// correctness of the program.
struct Inner {
thread: Thread,
woken: AtomicBool,
Expand Down Expand Up @@ -57,7 +63,7 @@ impl SignalToken {
let wake = self
.inner
.woken
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok();
if wake {
self.inner.thread.unpark();
Expand All @@ -82,14 +88,14 @@ impl SignalToken {

impl WaitToken {
pub fn wait(self) {
while !self.inner.woken.load(Ordering::SeqCst) {
while !self.inner.woken.load(Ordering::Acquire) {
thread::park()
}
}

/// Returns `true` if we wake up normally.
pub fn wait_max_until(self, end: Instant) -> bool {
while !self.inner.woken.load(Ordering::SeqCst) {
while !self.inner.woken.load(Ordering::Acquire) {
let now = Instant::now();
if now >= end {
return false;
Expand Down
33 changes: 20 additions & 13 deletions sgx_tstd/src/sync/mpsc/oneshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,15 +107,18 @@ impl<T> Packet<T> {
assert!((*self.data.get()).is_none());
ptr::write(self.data.get(), Some(t));
ptr::write(self.upgrade.get(), SendUsed);

match self.state.swap(DATA, Ordering::SeqCst) {
// Here state needs to synchronize with "data" and "upgrade",
// using release is enough to ensure the safety.
match self.state.swap(DATA, Ordering::Release) {
// Sent the data, no one was waiting
EMPTY => Ok(()),

// Couldn't send the data, the port hung up first. Return the data
// back up the stack.
DISCONNECTED => {
self.state.swap(DISCONNECTED, Ordering::SeqCst);
// Here state needs to synchronize with upgrade,
// using Acquire is enough to ensure the safety.
self.state.swap(DISCONNECTED, Ordering::Acquire);
ptr::write(self.upgrade.get(), NothingSent);
Err((&mut *self.data.get()).take().unwrap())
}
Expand All @@ -142,12 +145,12 @@ impl<T> Packet<T> {
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, Failure<T>> {
// Attempt to not block the thread (it's a little expensive). If it looks
// like we're not empty, then immediately go through to `try_recv`.
if self.state.load(Ordering::SeqCst) == EMPTY {
if self.state.load(Ordering::Acquire) == EMPTY {
let (wait_token, signal_token) = blocking::tokens();
let ptr = unsafe { signal_token.to_raw() };

// race with senders to enter the blocking state
if self.state.compare_exchange(EMPTY, ptr, Ordering::SeqCst, Ordering::SeqCst).is_ok() {
if self.state.compare_exchange(EMPTY, ptr, Ordering::Release, Ordering::Relaxed).is_ok() {
if let Some(deadline) = deadline {
let timed_out = !wait_token.wait_max_until(deadline);
// Try to reset the state
Expand All @@ -169,7 +172,7 @@ impl<T> Packet<T> {

pub fn try_recv(&self) -> Result<T, Failure<T>> {
unsafe {
match self.state.load(Ordering::SeqCst) {
match self.state.load(Ordering::Acquire) {
EMPTY => Err(Empty),

// We saw some data on the channel, but the channel can be used
Expand All @@ -179,11 +182,13 @@ impl<T> Packet<T> {
// the state changes under our feet we'd rather just see that state
// change.
DATA => {
// Here state needs to synchronize with "data",
// using release is enough to ensure the safety.
let _ = self.state.compare_exchange(
DATA,
EMPTY,
Ordering::SeqCst,
Ordering::SeqCst,
Ordering::Acquire,
Ordering::Relaxed,
);
match (&mut *self.data.get()).take() {
Some(data) => Ok(data),
Expand Down Expand Up @@ -222,7 +227,9 @@ impl<T> Packet<T> {
};
ptr::write(self.upgrade.get(), GoUp(up));

match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
// Here state needs to synchronize upgrade before and after it
// using AcqRel is enough to ensure the safety.
match self.state.swap(DISCONNECTED, Ordering::AcqRel) {
// If the channel is empty or has data on it, then we're good to go.
// Senders will check the data before the upgrade (in case we
// plastered over the DATA state).
Expand All @@ -242,7 +249,7 @@ impl<T> Packet<T> {
}

pub fn drop_chan(&self) {
match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
match self.state.swap(DISCONNECTED, Ordering::AcqRel) {
DATA | DISCONNECTED | EMPTY => {}

// If someone's waiting, we gotta wake them up
Expand All @@ -253,7 +260,7 @@ impl<T> Packet<T> {
}

pub fn drop_port(&self) {
match self.state.swap(DISCONNECTED, Ordering::SeqCst) {
match self.state.swap(DISCONNECTED, Ordering::AcqRel) {
// An empty channel has nothing to do, and a remotely disconnected
// channel also has nothing to do b/c we're about to run the drop
// glue
Expand All @@ -280,7 +287,7 @@ impl<T> Packet<T> {
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> Result<bool, Receiver<T>> {
let state = match self.state.load(Ordering::SeqCst) {
let state = match self.state.load(Ordering::Acquire) {
// Each of these states means that no further activity will happen
// with regard to abortion selection
s @ (EMPTY | DATA | DISCONNECTED) => s,
Expand All @@ -289,7 +296,7 @@ impl<T> Packet<T> {
// of it (may fail)
ptr => self
.state
.compare_exchange(ptr, EMPTY, Ordering::SeqCst, Ordering::SeqCst)
.compare_exchange(ptr, EMPTY, Ordering::AcqRel, Ordering::Acquire)
.unwrap_or_else(|x| x),
};

Expand Down
Loading