From ba44f2e211d2e2e3c1d67cd936b88824d12b13c1 Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Sun, 25 Jun 2023 21:07:00 +0000 Subject: [PATCH 1/4] Port outline-atomics to rust This has a very long history, summarized in https://github.com/rust-lang/rust/issues/109064. This port is a very minimal subset of `aarch64/lse.S` from LLVM's compiler-rt. In particular, it is missing the following: 1. Any form of runtime dispatch between LL/SC and LSE. Determining which version of the intrinsics to use requires one of the following: i) `getauxval` from glibc. It's unclear whether `compiler_builtins` is allowed to depend on libc at all, and musl doesn't even support getauxval. Don't enshrine the requirement "de-facto" by making it required for outline-atomics. ii) kernel support. Linux and FreeBSD have limited support, but it requires an extremely recent kernel version and doesn't work at all under QEMU (https://github.com/rust-lang/rust/issues/109064#issuecomment-1494939904). Instead, we hard-code LL/SC intrinsics. Users who want LSE support should use the LLVM compiler-rt (if you're building from source in rust-lang/rust, make sure you have `src/llvm-project` checked out locally. the goal is to soon add a new `optimized-compiler-builtins` option so this is easier to discover). 2. The global `___aarch64_have_lse_atomics` CTOR, required to do runtime dispatch. Thom Chiviolani has this to say about global CTORs: > static ctors are problems because we are pretty eager about dead code elim > in general if you have a module that isnt directly reference we will probably not have its static ctors > also, while llvm has a super robust way to have a static ctor (theres s special "appending global" to use for c++), we dont use that and just have people make a #[used] static in a special section > 1. the robust way kinda requires rust knowing that the argument is a static ctor (maybe a #[rustc_static_ctor] attribute). it also would be... finnicky, since on windows we actually care beyond being a static ctor, that we run as part in a specific group of ctors, which means a very specific section (one for TLS and the other for, uh, i dont remember) > 2. we still actually have to codegen the cgu that isn't referenced. but maybe we could remember that it has that attribute and use that So while this is possible in theory, it's decidedly non-trivial, and needs invasive changes to rust itself. In any case, it doesn't matter until we decide the story around libc. 3. The 16-byte (i128) version of compare_and_swap. This wouldn't be *too* hard to add, but it would be hard to test. The way I tested the existing code was not just with unit tests but also by loading it as a path dependency and running `x test core` - the latter caught several bugs the unit tests didn't catch (because I originally wrote the tests wrong). So I am slightly nervous about adding a 16-byte version that is much more poorly tested than the other intrinsics. --- build.rs | 57 ++++++++++- src/aarch64.rs | 221 +++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 3 + src/macros.rs | 2 +- testcrate/tests/lse.rs | 88 ++++++++++++++++ 5 files changed, 369 insertions(+), 2 deletions(-) create mode 100644 src/aarch64.rs create mode 100644 testcrate/tests/lse.rs diff --git a/build.rs b/build.rs index 766dec05..266cc28b 100644 --- a/build.rs +++ b/build.rs @@ -1,4 +1,4 @@ -use std::env; +use std::{collections::HashMap, env, sync::atomic::Ordering}; fn main() { println!("cargo:rerun-if-changed=build.rs"); @@ -90,6 +90,61 @@ fn main() { { println!("cargo:rustc-cfg=kernel_user_helpers") } + + if llvm_target[0] == "aarch64" { + generate_aarch64_outlined_atomics(); + } +} + +fn aarch64_symbol(ordering: Ordering) -> &'static str { + match ordering { + Ordering::Relaxed => "relax", + Ordering::Acquire => "acq", + Ordering::Release => "rel", + Ordering::AcqRel => "acq_rel", + _ => panic!("unknown symbol for {:?}", ordering), + } +} + +/// The `concat_idents` macro is extremely annoying and doesn't allow us to define new items. +/// Define them from the build script instead. +/// Note that the majority of the code is still defined in `aarch64.rs` through inline macros. +fn generate_aarch64_outlined_atomics() { + use std::fmt::Write; + // #[macro_export] so that we can use this in tests + let gen_macro = + |name| format!("#[macro_export] macro_rules! foreach_{name} {{ ($macro:path) => {{\n"); + + // Generate different macros for add/clr/eor/set so that we can test them separately. + let sym_names = ["cas", "ldadd", "ldclr", "ldeor", "ldset", "swp"]; + let mut macros = HashMap::new(); + for sym in sym_names { + macros.insert(sym, gen_macro(sym)); + } + + for ordering in [ + Ordering::Relaxed, + Ordering::Acquire, + Ordering::Release, + Ordering::AcqRel, + ] { + let sym_ordering = aarch64_symbol(ordering); + // TODO: support CAS 16 + for size in [1, 2, 4, 8 /* , 16*/] { + for (sym, macro_) in &mut macros { + let name = format!("__aarch64_{sym}{size}_{sym_ordering}"); + writeln!(macro_, "$macro!( {ordering:?}, {size}, {name} );").unwrap(); + } + } + } + + let mut buf = String::new(); + for macro_def in macros.values() { + buf += macro_def; + buf += "}; }"; + } + let dst = std::env::var("OUT_DIR").unwrap() + "/outlined_atomics.rs"; + std::fs::write(dst, buf).unwrap(); } #[cfg(feature = "c")] diff --git a/src/aarch64.rs b/src/aarch64.rs new file mode 100644 index 00000000..01888065 --- /dev/null +++ b/src/aarch64.rs @@ -0,0 +1,221 @@ +//! Aarch64 targets have two possible implementations for atomics: +//! 1. Load-Locked, Store-Conditional (LL/SC), older and slower. +//! 2. Large System Extensions (LSE), newer and faster. +//! To avoid breaking backwards compat, C toolchains introduced a concept of "outlined atomics", +//! where atomic operations call into the compiler runtime to dispatch between two depending on +//! which is supported on the current CPU. +//! See https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10#:~:text=out%20of%20line%20atomics for more discussion. +//! +//! Currently we only support LL/SC, because LSE requires `getauxval` from libc in order to do runtime detection. +//! Use the `compiler-rt` intrinsics if you want LSE support. +//! +//! Ported from `aarch64/lse.S` in LLVM's compiler-rt. +//! +//! Generate functions for each of the following symbols: +//! __aarch64_swpN_ORDER +//! __aarch64_ldaddN_ORDER +//! __aarch64_ldclrN_ORDER +//! __aarch64_ldeorN_ORDER +//! __aarch64_ldsetN_ORDER +//! for N = {1, 2, 4, 8}, M = {1, 2, 4, 8}, ORDER = { relax, acq, rel, acq_rel } +//! +//! TODO: M = 16 +//! +//! The original `lse.S` has some truly horrifying code that expects to be compiled multiple times with different constants. +//! We do something similar, but with macro arguments. + +/// We don't do runtime dispatch so we don't have to worry about the global ctor. +/// Apparently MacOS uses a different number of underscores in the symbol name (???) +// #[cfg(target_vendor = "apple")] +// macro_rules! have_lse { +// () => { ___aarch64_have_lse_atomics } +// } + +// #[cfg(not(target_vendor = "apple"))] +// macro_rules! have_lse { +// () => { __aarch64_have_lse_atomics } +// } + +/// Translate a byte size to a Rust type. +macro_rules! int_ty { + (1) => { i8 }; + (2) => { i16 }; + (4) => { i32 }; + (8) => { i64 }; + (16) => { i128 }; +} + +/// Given a byte size and a register number, return a register of the appropriate size. +/// +/// See . +macro_rules! reg { + (1, $num:literal) => { concat!("w", $num) }; + (2, $num:literal) => { concat!("w", $num) }; + (4, $num:literal) => { concat!("w", $num) }; + (8, $num:literal) => { concat!("x", $num) }; +} + +/// Given an atomic ordering, translate it to the acquire suffix for the lxdr aarch64 ASM instruction. +macro_rules! acquire { + (Relaxed) => { "" }; + (Acquire) => { "a" }; + (Release) => { "" }; + (AcqRel) => { "a" }; +} + +/// Given an atomic ordering, translate it to the release suffix for the stxr aarch64 ASM instruction. +macro_rules! release { + (Relaxed) => { "" }; + (Acquire) => { "" }; + (Release) => { "l" }; + (AcqRel) => { "l" }; +} + +/// Given a size in bytes, translate it to the byte suffix for an aarch64 ASM instruction. +macro_rules! size { + (1) => { "b" }; + (2) => { "h" }; + (4) => { "" }; + (8) => { "" }; + (16) => { "" }; +} + +/// Given a byte size, translate it to an Unsigned eXTend instruction +/// with the correct semantics. +/// +/// See +macro_rules! uxt { + (1) => { "uxtb" }; + (2) => { "uxth" }; + ($_:tt) => { "mov" }; +} + +/// Given an atomic ordering and byte size, translate it to a LoaD eXclusive Register instruction +/// with the correct semantics. +/// +/// See . +macro_rules! ldxr { + ($ordering:ident, $bytes:tt) => { concat!("ld", acquire!($ordering), "xr", size!($bytes)) } +} + +/// Given an atomic ordering and byte size, translate it to a STore eXclusive Register instruction +/// with the correct semantics. +/// +/// See . +macro_rules! stxr { + ($ordering:ident, $bytes:tt) => { concat!("st", release!($ordering), "xr", size!($bytes)) } +} + +/// See . +macro_rules! compare_and_swap { + ($ordering:ident, $bytes:tt, $name:ident) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub extern "C" fn $name ( + expected: int_ty!($bytes), desired: int_ty!($bytes), ptr: *mut int_ty!($bytes) + ) -> int_ty!($bytes) { + // We can't use `AtomicI8::compare_and_swap`; we *are* compare_and_swap. + unsafe { core::arch::asm! { + // UXT s(tmp0), s(0) + concat!(uxt!($bytes), " ", reg!($bytes, 16), ", ", reg!($bytes, 0)), + "0:", + // LDXR s(0), [x2] + concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x2]"), + // cmp s(0), s(tmp0) + concat!("cmp ", reg!($bytes, 0), ", ", reg!($bytes, 16)), + "bne 1f", + // STXR w(tmp1), s(1), [x2] + concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 1), ", [x2]"), + "cbnz w17, 0b", + "1:", + "ret", + options(noreturn) + } } + } + } + } +} + + +/// See . +macro_rules! swap { + ($ordering:ident, $bytes:tt, $name:ident) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub extern "C" fn $name ( + left: int_ty!($bytes), right_ptr: *mut int_ty!($bytes) + ) -> int_ty!($bytes) { + unsafe { core::arch::asm! { + // mov s(tmp0), s(0) + concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)), + "0:", + // LDXR s(0), [x1] + concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x1]"), + // STXR w(tmp1), s(tmp0), [x1] + concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 16), ", [x1]"), + "cbnz w17, 0b", + "ret", + options(noreturn) + } } + } + } + } +} + +/// See (e.g.) . +macro_rules! fetch_op { + ($ordering:ident, $bytes:tt, $name:ident, $op:literal) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub extern "C" fn $name ( + val: int_ty!($bytes), ptr: *mut int_ty!($bytes) + ) -> int_ty!($bytes) { + unsafe { core::arch::asm! { + // mov s(tmp0), s(0) + concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)), + "0:", + // LDXR s(0), [x1] + concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x1]"), + // OP s(tmp1), s(0), s(tmp0) + concat!($op, " ", reg!($bytes, 17), ", ", reg!($bytes, 0), ", ", reg!($bytes, 16)), + // STXR w(tmp2), s(tmp1), [x1] + concat!(stxr!($ordering, $bytes), " w15, ", reg!($bytes, 17), ", [x1]"), + "cbnz w15, 0b", + "ret", + options(noreturn) + } } + } + } + } +} + +// We need a single macro to pass to `foreach_ldadd`. +macro_rules! add { + ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "add" } } +} + +macro_rules! and { + ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "bic" } } +} + +macro_rules! xor { + ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "eor" } } +} + +macro_rules! or { + ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "orr" } } +} + +// See `generate_aarch64_outlined_atomics` in build.rs. +include!(concat!(env!("OUT_DIR"), "/outlined_atomics.rs")); +foreach_cas!(compare_and_swap); +foreach_swp!(swap); +foreach_ldadd!(add); +foreach_ldclr!(and); +foreach_ldeor!(xor); +foreach_ldset!(or); + +// TODO: CAS 16 diff --git a/src/lib.rs b/src/lib.rs index 71f249c8..90b21f1f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -57,6 +57,9 @@ pub mod mem; #[cfg(target_arch = "arm")] pub mod arm; +#[cfg(target_arch = "aarch64")] +pub mod aarch64; + #[cfg(all( kernel_user_helpers, any(target_os = "linux", target_os = "android"), diff --git a/src/macros.rs b/src/macros.rs index b3becde7..e3a38192 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -419,7 +419,7 @@ macro_rules! intrinsics { ( #[naked] $(#[$($attr:tt)*])* - pub unsafe extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { + pub $(unsafe)? extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { $($body:tt)* } diff --git a/testcrate/tests/lse.rs b/testcrate/tests/lse.rs new file mode 100644 index 00000000..49d73177 --- /dev/null +++ b/testcrate/tests/lse.rs @@ -0,0 +1,88 @@ +#![cfg(target_arch = "aarch64")] +#![feature(decl_macro)] // so we can use pub(super) + +/// Translate a byte size to a Rust type. +macro int_ty { + (1) => { i8 }, + (2) => { i16 }, + (4) => { i32 }, + (8) => { i64 }, + (16) => { i128 } +} + +mod cas { + pub(super) macro test($_ordering:ident, $bytes:tt, $name:ident) { + #[test] + fn $name() { + testcrate::fuzz_2(10000, |expected: super::int_ty!($bytes), new| { + let mut target = expected.wrapping_add(10); + assert_eq!( + unsafe { compiler_builtins::aarch64::$name::$name(expected, new, &mut target) }, + expected.wrapping_add(10), + "return value should always be the previous value", + ); + assert_eq!( + target, + expected.wrapping_add(10), + "shouldn't have changed target" + ); + + target = expected; + assert_eq!( + unsafe { compiler_builtins::aarch64::$name::$name(expected, new, &mut target) }, + expected + ); + assert_eq!(target, new, "should have updated target"); + }); + } + } +} + +mod swap { + pub(super) macro test($_ordering:ident, $bytes:tt, $name:ident) { + #[test] + fn $name() { + testcrate::fuzz_2(10000, |left: super::int_ty!($bytes), mut right| { + let orig_right = right; + assert_eq!( + unsafe { compiler_builtins::aarch64::$name::$name(left, &mut right) }, + orig_right + ); + assert_eq!(left, right); + }); + } + } +} + +macro_rules! test_op { + ($mod:ident, $( $op:tt )* ) => { + mod $mod { + pub(super) macro test { + ($_ordering:ident, $bytes:tt, $name:ident) => { + #[test] + fn $name() { + testcrate::fuzz_2(10000, |old, val| { + let mut target = old; + let op: fn(super::int_ty!($bytes), super::int_ty!($bytes)) -> _ = $($op)*; + let expected = op(old, val); + assert_eq!(old, unsafe { compiler_builtins::aarch64::$name::$name(val, &mut target) }, "{} should return original value", stringify!($name)); + assert_eq!(expected, target, "{} should store to target", stringify!($name)); + }); + } + } + } + } + }; +} + +test_op!(add, |left, right| left.wrapping_add(right)); +test_op!(clr, |left, right| left & !right); +test_op!(xor, std::ops::BitXor::bitxor); +test_op!(or, std::ops::BitOr::bitor); + +compiler_builtins::foreach_cas!(cas::test); +compiler_builtins::foreach_swp!(swap::test); +compiler_builtins::foreach_ldadd!(add::test); +compiler_builtins::foreach_ldclr!(clr::test); +compiler_builtins::foreach_ldeor!(xor::test); +compiler_builtins::foreach_ldset!(or::test); From 31ee4544dbe47903ce771270d6e3bea8654e9e50 Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Mon, 26 Jun 2023 13:54:47 +0000 Subject: [PATCH 2/4] address review comments and fix CI - implement CAS 16 - remove useless commented out symbol name - support `feature("no-asm")` - fix warnings when `feature("c")` is enabled - rustfmt --- build.rs | 10 ++-- src/aarch64.rs | 103 +++++++++++++++++++++++++++++++---------- src/lib.rs | 6 ++- testcrate/tests/lse.rs | 7 ++- 4 files changed, 97 insertions(+), 29 deletions(-) diff --git a/build.rs b/build.rs index 266cc28b..4549d0b4 100644 --- a/build.rs +++ b/build.rs @@ -122,6 +122,9 @@ fn generate_aarch64_outlined_atomics() { macros.insert(sym, gen_macro(sym)); } + // Only CAS supports 16 bytes, and it has a different implementation that uses a different macro. + let mut cas16 = gen_macro("cas16"); + for ordering in [ Ordering::Relaxed, Ordering::Acquire, @@ -129,17 +132,18 @@ fn generate_aarch64_outlined_atomics() { Ordering::AcqRel, ] { let sym_ordering = aarch64_symbol(ordering); - // TODO: support CAS 16 - for size in [1, 2, 4, 8 /* , 16*/] { + for size in [1, 2, 4, 8] { for (sym, macro_) in &mut macros { let name = format!("__aarch64_{sym}{size}_{sym_ordering}"); writeln!(macro_, "$macro!( {ordering:?}, {size}, {name} );").unwrap(); } } + let name = format!("__aarch64_cas16_{sym_ordering}"); + writeln!(cas16, "$macro!( {ordering:?}, {name} );").unwrap(); } let mut buf = String::new(); - for macro_def in macros.values() { + for macro_def in macros.values().chain(std::iter::once(&cas16)) { buf += macro_def; buf += "}; }"; } diff --git a/src/aarch64.rs b/src/aarch64.rs index 01888065..1aaa1a69 100644 --- a/src/aarch64.rs +++ b/src/aarch64.rs @@ -12,31 +12,21 @@ //! Ported from `aarch64/lse.S` in LLVM's compiler-rt. //! //! Generate functions for each of the following symbols: +//! __aarch64_casM_ORDER //! __aarch64_swpN_ORDER //! __aarch64_ldaddN_ORDER //! __aarch64_ldclrN_ORDER //! __aarch64_ldeorN_ORDER //! __aarch64_ldsetN_ORDER -//! for N = {1, 2, 4, 8}, M = {1, 2, 4, 8}, ORDER = { relax, acq, rel, acq_rel } -//! -//! TODO: M = 16 +//! for N = {1, 2, 4, 8}, M = {1, 2, 4, 8, 16}, ORDER = { relax, acq, rel, acq_rel } //! //! The original `lse.S` has some truly horrifying code that expects to be compiled multiple times with different constants. //! We do something similar, but with macro arguments. -/// We don't do runtime dispatch so we don't have to worry about the global ctor. -/// Apparently MacOS uses a different number of underscores in the symbol name (???) -// #[cfg(target_vendor = "apple")] -// macro_rules! have_lse { -// () => { ___aarch64_have_lse_atomics } -// } - -// #[cfg(not(target_vendor = "apple"))] -// macro_rules! have_lse { -// () => { __aarch64_have_lse_atomics } -// } +// We don't do runtime dispatch so we don't have to worry about the `__aarch64_have_lse_atomics` global ctor. /// Translate a byte size to a Rust type. +#[rustfmt::skip] macro_rules! int_ty { (1) => { i8 }; (2) => { i16 }; @@ -48,6 +38,7 @@ macro_rules! int_ty { /// Given a byte size and a register number, return a register of the appropriate size. /// /// See . +#[rustfmt::skip] macro_rules! reg { (1, $num:literal) => { concat!("w", $num) }; (2, $num:literal) => { concat!("w", $num) }; @@ -56,6 +47,7 @@ macro_rules! reg { } /// Given an atomic ordering, translate it to the acquire suffix for the lxdr aarch64 ASM instruction. +#[rustfmt::skip] macro_rules! acquire { (Relaxed) => { "" }; (Acquire) => { "a" }; @@ -64,6 +56,7 @@ macro_rules! acquire { } /// Given an atomic ordering, translate it to the release suffix for the stxr aarch64 ASM instruction. +#[rustfmt::skip] macro_rules! release { (Relaxed) => { "" }; (Acquire) => { "" }; @@ -72,6 +65,7 @@ macro_rules! release { } /// Given a size in bytes, translate it to the byte suffix for an aarch64 ASM instruction. +#[rustfmt::skip] macro_rules! size { (1) => { "b" }; (2) => { "h" }; @@ -84,6 +78,7 @@ macro_rules! size { /// with the correct semantics. /// /// See +#[rustfmt::skip] macro_rules! uxt { (1) => { "uxtb" }; (2) => { "uxth" }; @@ -95,7 +90,9 @@ macro_rules! uxt { /// /// See . macro_rules! ldxr { - ($ordering:ident, $bytes:tt) => { concat!("ld", acquire!($ordering), "xr", size!($bytes)) } + ($ordering:ident, $bytes:tt) => { + concat!("ld", acquire!($ordering), "xr", size!($bytes)) + }; } /// Given an atomic ordering and byte size, translate it to a STore eXclusive Register instruction @@ -103,7 +100,29 @@ macro_rules! ldxr { /// /// See . macro_rules! stxr { - ($ordering:ident, $bytes:tt) => { concat!("st", release!($ordering), "xr", size!($bytes)) } + ($ordering:ident, $bytes:tt) => { + concat!("st", release!($ordering), "xr", size!($bytes)) + }; +} + +/// Given an atomic ordering and byte size, translate it to a LoaD eXclusive Pair of registers instruction +/// with the correct semantics. +/// +/// See +macro_rules! ldxp { + ($ordering:ident) => { + concat!("ld", acquire!($ordering), "xp") + }; +} + +/// Given an atomic ordering and byte size, translate it to a STore eXclusive Pair of registers instruction +/// with the correct semantics. +/// +/// See . +macro_rules! stxp { + ($ordering:ident) => { + concat!("st", release!($ordering), "xp") + }; } /// See . @@ -134,9 +153,38 @@ macro_rules! compare_and_swap { } } } } - } + }; } +// i128 uses a completely different impl, so it has its own macro. +macro_rules! compare_and_swap_i128 { + ($ordering:ident, $name:ident) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub extern "C" fn $name ( + expected: i128, desired: i128, ptr: *mut i128 + ) -> i128 { + unsafe { core::arch::asm! { + "mov x16, x0", + "mov x17, x1", + "0:", + // LDXP x0, x1, [x4] + concat!(ldxp!($ordering), " x0, x1, [x4]"), + "cmp x0, x16", + "ccmp x1, x17, #0, eq", + "bne 1f", + // STXP w(tmp2), x2, x3, [x4] + concat!(stxp!($ordering), " w15, x2, x3, [x4]"), + "cbnz w15, 0b", + "1:", + "ret", + options(noreturn) + } } + } + } + }; +} /// See . macro_rules! swap { @@ -161,7 +209,7 @@ macro_rules! swap { } } } } - } + }; } /// See (e.g.) . @@ -194,28 +242,35 @@ macro_rules! fetch_op { // We need a single macro to pass to `foreach_ldadd`. macro_rules! add { - ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "add" } } + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "add" } + }; } macro_rules! and { - ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "bic" } } + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "bic" } + }; } macro_rules! xor { - ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "eor" } } + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "eor" } + }; } macro_rules! or { - ($ordering:ident, $bytes:tt, $name:ident) => { fetch_op! { $ordering, $bytes, $name, "orr" } } + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "orr" } + }; } // See `generate_aarch64_outlined_atomics` in build.rs. include!(concat!(env!("OUT_DIR"), "/outlined_atomics.rs")); foreach_cas!(compare_and_swap); +foreach_cas16!(compare_and_swap_i128); foreach_swp!(swap); foreach_ldadd!(add); foreach_ldclr!(and); foreach_ldeor!(xor); foreach_ldset!(or); - -// TODO: CAS 16 diff --git a/src/lib.rs b/src/lib.rs index 90b21f1f..4b44adc2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -57,7 +57,11 @@ pub mod mem; #[cfg(target_arch = "arm")] pub mod arm; -#[cfg(target_arch = "aarch64")] +#[cfg(all( + target_arch = "aarch64", + not(feature = "no-asm"), + not(feature = "optimized-c") +))] pub mod aarch64; #[cfg(all( diff --git a/testcrate/tests/lse.rs b/testcrate/tests/lse.rs index 49d73177..7b54ab5d 100644 --- a/testcrate/tests/lse.rs +++ b/testcrate/tests/lse.rs @@ -1,5 +1,5 @@ -#![cfg(target_arch = "aarch64")] #![feature(decl_macro)] // so we can use pub(super) +#![cfg(all(target_arch = "aarch64", not(feature = "no-asm")))] /// Translate a byte size to a Rust type. macro int_ty { @@ -38,6 +38,10 @@ mod cas { } } +macro test_cas16($_ordering:ident, $name:ident) { + cas::test!($_ordering, 16, $name); +} + mod swap { pub(super) macro test($_ordering:ident, $bytes:tt, $name:ident) { #[test] @@ -81,6 +85,7 @@ test_op!(xor, std::ops::BitXor::bitxor); test_op!(or, std::ops::BitOr::bitor); compiler_builtins::foreach_cas!(cas::test); +compiler_builtins::foreach_cas16!(test_cas16); compiler_builtins::foreach_swp!(swap::test); compiler_builtins::foreach_ldadd!(add::test); compiler_builtins::foreach_ldclr!(clr::test); From 07cf3b4f109de98910a443c3cacce779eba54cf0 Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Mon, 26 Jun 2023 16:27:16 +0000 Subject: [PATCH 3/4] require naked functions to be unsafe again they dereference raw pointers, so the caller needs to make sure the pointer is valid. note that this requires changing `maybe_use_optimized_c_shim` to support unsafe functions. --- src/aarch64.rs | 8 ++++---- src/macros.rs | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/aarch64.rs b/src/aarch64.rs index 1aaa1a69..ddbec6d3 100644 --- a/src/aarch64.rs +++ b/src/aarch64.rs @@ -131,7 +131,7 @@ macro_rules! compare_and_swap { intrinsics! { #[maybe_use_optimized_c_shim] #[naked] - pub extern "C" fn $name ( + pub unsafe extern "C" fn $name ( expected: int_ty!($bytes), desired: int_ty!($bytes), ptr: *mut int_ty!($bytes) ) -> int_ty!($bytes) { // We can't use `AtomicI8::compare_and_swap`; we *are* compare_and_swap. @@ -162,7 +162,7 @@ macro_rules! compare_and_swap_i128 { intrinsics! { #[maybe_use_optimized_c_shim] #[naked] - pub extern "C" fn $name ( + pub unsafe extern "C" fn $name ( expected: i128, desired: i128, ptr: *mut i128 ) -> i128 { unsafe { core::arch::asm! { @@ -192,7 +192,7 @@ macro_rules! swap { intrinsics! { #[maybe_use_optimized_c_shim] #[naked] - pub extern "C" fn $name ( + pub unsafe extern "C" fn $name ( left: int_ty!($bytes), right_ptr: *mut int_ty!($bytes) ) -> int_ty!($bytes) { unsafe { core::arch::asm! { @@ -218,7 +218,7 @@ macro_rules! fetch_op { intrinsics! { #[maybe_use_optimized_c_shim] #[naked] - pub extern "C" fn $name ( + pub unsafe extern "C" fn $name ( val: int_ty!($bytes), ptr: *mut int_ty!($bytes) ) -> int_ty!($bytes) { unsafe { core::arch::asm! { diff --git a/src/macros.rs b/src/macros.rs index e3a38192..b11114f1 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -204,7 +204,7 @@ macro_rules! intrinsics { ( #[maybe_use_optimized_c_shim] $(#[$($attr:tt)*])* - pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { + pub $(unsafe $(@ $empty:tt)? )? extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { $($body:tt)* } @@ -212,7 +212,7 @@ macro_rules! intrinsics { ) => ( #[cfg($name = "optimized-c")] #[cfg_attr(feature = "weak-intrinsics", linkage = "weak")] - pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { + pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { extern $abi { fn $name($($argname: $ty),*) $(-> $ret)?; } @@ -224,7 +224,7 @@ macro_rules! intrinsics { #[cfg(not($name = "optimized-c"))] intrinsics! { $(#[$($attr)*])* - pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { + pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { $($body)* } } @@ -419,7 +419,7 @@ macro_rules! intrinsics { ( #[naked] $(#[$($attr:tt)*])* - pub $(unsafe)? extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { + pub unsafe extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { $($body:tt)* } From cac12e8184f94e5a3ae7cdb247c70e3578b56caa Mon Sep 17 00:00:00 2001 From: Joshua Nelson Date: Tue, 27 Jun 2023 02:31:42 +0000 Subject: [PATCH 4/4] fix tests to work with `--feature c` --- src/aarch64.rs | 1 + src/lib.rs | 6 +----- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/aarch64.rs b/src/aarch64.rs index ddbec6d3..62144e53 100644 --- a/src/aarch64.rs +++ b/src/aarch64.rs @@ -22,6 +22,7 @@ //! //! The original `lse.S` has some truly horrifying code that expects to be compiled multiple times with different constants. //! We do something similar, but with macro arguments. +#![cfg_attr(feature = "c", allow(unused_macros))] // avoid putting the macros into a submodule // We don't do runtime dispatch so we don't have to worry about the `__aarch64_have_lse_atomics` global ctor. diff --git a/src/lib.rs b/src/lib.rs index 4b44adc2..2e5c587f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -57,11 +57,7 @@ pub mod mem; #[cfg(target_arch = "arm")] pub mod arm; -#[cfg(all( - target_arch = "aarch64", - not(feature = "no-asm"), - not(feature = "optimized-c") -))] +#[cfg(all(target_arch = "aarch64", not(feature = "no-asm"),))] pub mod aarch64; #[cfg(all(