diff --git a/ext/gcc/atomic b/ext/gcc/atomic new file mode 100644 index 0000000000..fddc75ca81 --- /dev/null +++ b/ext/gcc/atomic @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2024, Niklas Hauser + * + * This file is part of the modm project. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ +// ---------------------------------------------------------------------------- + +#include_next + +#include diff --git a/ext/gcc/atomic.cpp.in b/ext/gcc/atomic.cpp.in new file mode 100644 index 0000000000..b9c0408c0e --- /dev/null +++ b/ext/gcc/atomic.cpp.in @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2020, 2024, Niklas Hauser + * + * This file is part of the modm project. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ +// ---------------------------------------------------------------------------- + +#include + +/* We are implementing the libary interface described here: + * See https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary + * + * We ignore the memory order, since the runtime switching takes longer than + * the DMB instruction. + */ + +// ============================ atomics for arrays ============================ +// These functions cannot be inlined, since the compiler builtins are named the +// same. Terrible design really. +extern "C" void +__atomic_load(unsigned int size, const volatile void *src, void *dest, int /*memorder*/) +{ + __modm_atomic_pre_barrier(__ATOMIC_SEQ_CST); + { + modm::atomic::Lock _; + __builtin_memcpy(dest, (const void*)src, size); + } + __modm_atomic_post_barrier(__ATOMIC_SEQ_CST); +} + +extern "C" void +__atomic_store(unsigned int size, volatile void *dest, void *src, int /*memorder*/) +{ + __modm_atomic_pre_barrier(__ATOMIC_SEQ_CST); + { + modm::atomic::Lock _; + __builtin_memcpy((void*)dest, src, size); + } + __modm_atomic_post_barrier(__ATOMIC_SEQ_CST); +} + +extern "C" void +__atomic_exchange(unsigned int size, volatile void *ptr, void *val, void *ret, int /*memorder*/) +{ + __modm_atomic_pre_barrier(__ATOMIC_SEQ_CST); + { + modm::atomic::Lock _; + __builtin_memcpy(ret, (void*)ptr, size); + __builtin_memcpy((void*)ptr, val, size); + } + __modm_atomic_post_barrier(__ATOMIC_SEQ_CST); +} + +extern "C" bool +__atomic_compare_exchange(unsigned int len, volatile void *ptr, void *expected, void *desired, + int /*success_memorder*/, int /*failure_memorder*/) +{ + bool retval{false}; + __modm_atomic_pre_barrier(__ATOMIC_SEQ_CST); + { + modm::atomic::Lock _; + if (__builtin_memcmp((void*)ptr, expected, len) == 0) [[likely]] + { + __builtin_memcpy((void*)ptr, desired, len); + retval = true; + } + else __builtin_memcpy(expected, (void*)ptr, len); + } + __modm_atomic_post_barrier(__ATOMIC_SEQ_CST); + return retval; +} + +%% macro atomic_fetch(len) + %% for name, op in [("add", "+"), ("sub", "-")] +extern "C" {{len|u}} +__atomic_fetch_{{name}}_{{len//8}}(volatile void *ptr, {{len|u}} value, int /*memorder*/) +{ + {{len|u}} previous{}; + __modm_atomic_pre_barrier(__ATOMIC_SEQ_CST); + { + modm::atomic::Lock _; + previous = *reinterpret_cast(ptr); + *reinterpret_cast(ptr) = (previous {{op}} value); + } + __modm_atomic_post_barrier(__ATOMIC_SEQ_CST); + return previous; +} + %% endfor +%% endmacro + +%% for length in bit_lengths +// ========================= atomics for {{length}} bit integers ========================= +// These functions cannot be inlined since the compiler refuses to find these +// functions even if they are declared right at the call site. Unclear why. +{{ atomic_fetch(length) }} +%% endfor diff --git a/ext/gcc/atomics_c11_cortex.cpp.in b/ext/gcc/atomics_c11_cortex.cpp.in deleted file mode 100644 index 63546c329f..0000000000 --- a/ext/gcc/atomics_c11_cortex.cpp.in +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright (c) 2020, Niklas Hauser - * - * This file is part of the modm project. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - */ -// ---------------------------------------------------------------------------- - - -#include -#include - -/* Cortex-M0 does not have hardware support for true atomics, like STREX/LDREX. - * The toolchain does not implement the intrinsics, instead linking to them, so - * that an external library can implement them as they wish. - * Here we wrap all operations into an atomic lock, which globally disables - * interrupts. This isn't high performance, but we have no other choice here. - * - * See https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html - */ - -using _a8 = uint8_t; -using _a16 = uint16_t; -using _a32 = unsigned int; -using _a64 = uint64_t; - -// =========================== atomics for >64 bits =========================== -%% macro atomic_load(len) -extern "C" _a{{len}} -__atomic_load_{{len//8}}(const volatile void *ptr, int /*memorder*/) -{ - modm::atomic::Lock _; - return *reinterpret_cast(ptr); -} -%% endmacro - -extern "C" void -__atomic_load_c(size_t size, const void *src, void *dest, int /*memorder*/) -{ - modm::atomic::Lock _; - std::memcpy(dest, src, size); -} - - -%% macro atomic_store(len) -extern "C" void -__atomic_store_{{len//8}}(volatile void *ptr, _a{{len}} value, int /*memorder*/) -{ - modm::atomic::Lock _; - *reinterpret_cast(ptr) = value; -} -%% endmacro - -extern "C" void -__atomic_store_c(size_t size, void *dest, const void *src, int /*memorder*/) -{ - modm::atomic::Lock _; - std::memcpy(dest, src, size); -} - - -%% macro atomic_exchange(len) -extern "C" _a{{len}} -__atomic_exchange_{{len//8}}(volatile void *ptr, _a{{len}} desired, int /*memorder*/) -{ - modm::atomic::Lock _; - const _a{{len}} previous = *reinterpret_cast(ptr); - *reinterpret_cast(ptr) = desired; - return previous; -} -%% endmacro - -extern "C" void -__atomic_exchange_c(size_t size, void *ptr, void *val, void *ret, int /*memorder*/) -{ - modm::atomic::Lock _; - std::memcpy(ret, ptr, size); - std::memcpy(ptr, val, size); -} - - -%% macro atomic_compare_exchange(len) -extern "C" bool -__atomic_compare_exchange_{{len//8}}(volatile void *ptr, void *expected, _a{{len}} desired, - bool /*weak*/, int /*success_memorder*/, int /*failure_memorder*/) -{ - modm::atomic::Lock _; - const _a{{len}} current = *reinterpret_cast(ptr); - if (current != *reinterpret_cast<_a{{len}}*>(expected)) - { - *reinterpret_cast<_a{{len}}*>(expected) = current; - return false; - } - *reinterpret_cast(ptr) = desired; - return true; -} -%% endmacro - -extern "C" bool -__atomic_compare_exchange_c(size_t len, void *ptr, void *expected, void *desired, - bool /*weak*/, int /*success_memorder*/, int /*failure_memorder*/) -{ - modm::atomic::Lock _; - if (std::memcmp(ptr, expected, len) == 0) - { - std::memcpy(ptr, desired, len); - return true; - } - std::memcpy(expected, ptr, len); - return false; -} - - -%% macro atomic_fetch(len) - %% for name, op in [("add", "+"), ("sub", "-"), ("and", "&"), ("or", "|"), ("xor", "^"), ("nand", "&")] - %% set prefix = "~" if name == "nand" else "" -extern "C" _a{{len}} -__atomic_fetch_{{name}}_{{len//8}}(volatile void *ptr, _a{{len}} value, int /*memorder*/) -{ - modm::atomic::Lock _; - const _a{{len}} previous = *reinterpret_cast(ptr); - *reinterpret_cast(ptr) = {{prefix}}(previous {{op}} value); - return previous; -} -extern "C" _a{{len}} -__atomic_{{name}}_fetch_{{len//8}}(volatile void *ptr, _a{{len}} value, int /*memorder*/) -{ - modm::atomic::Lock _; - const _a{{len}} current = {{prefix}}(*reinterpret_cast(ptr) {{op}} value); - *reinterpret_cast(ptr) = current; - return current; -} - %% endfor -%% endmacro - -%% for length in [8, 16, 32, 64] -// ============================ atomics for {{length}} bits ============================ -{{ atomic_load(length) }} - -{{ atomic_store(length) }} - -{{ atomic_exchange(length) }} - -{{ atomic_compare_exchange(length) }} - -{{ atomic_fetch(length) }} -%% endfor diff --git a/ext/gcc/cxxabi.cpp.in b/ext/gcc/cxxabi.cpp.in index 42b979ac73..30e3f7df3f 100644 --- a/ext/gcc/cxxabi.cpp.in +++ b/ext/gcc/cxxabi.cpp.in @@ -2,7 +2,7 @@ * Copyright (c) 2009-2011, Fabian Greif * Copyright (c) 2010, Martin Rosekeit * Copyright (c) 2012, Sascha Schade - * Copyright (c) 2012-2014, 2020, Niklas Hauser + * Copyright (c) 2012-2014, 2020, 2024, Niklas Hauser * * This file is part of the modm project. * @@ -27,44 +27,72 @@ void __cxa_deleted_virtual() %% if with_threadsafe_statics #include -/* One-time construction API, see ARM IHI0041D section 3.2.3. - * The ARM C++ ABI mandates the guard to be 32-bit aligned, 32-bit values. - */ +%% if is_avr +%# +// Even thought the actual guard size is uint64_t on AVR, we only need to access +// the first uint8_t and thus can significantly reduce the code size of the +// atomic access implementation. +using guard_type = uint8_t; +%% elif is_cortex_m +#include + +// One-time construction API, see ARM IHI0041D section 3.2.3. +// The ARM C++ ABI mandates the guard to be 32-bit aligned, 32-bit values. +using guard_type = uint32_t; +%% else +%# +using guard_type = uint64_t; +%% endif +%# enum { UNINITIALIZED = 0, INITIALIZED = 1, - INITIALIZING = 0x100, + INITIALIZING = 0x10, }; // This function returns 1 only if the object needs to be initialized -extern "C" int __cxa_guard_acquire(int *guard) +extern "C" int +__cxa_guard_acquire(guard_type *guard) { auto atomic_guard = std::atomic_ref(*guard); - if (atomic_guard.load() == INITIALIZED) - return 0; - if (atomic_guard.exchange(INITIALIZING) == INITIALIZING) + guard_type value = atomic_guard.load(std::memory_order_relaxed); + do { - modm_assert(0, "stat.rec", - "Recursive initialization of a function static!", guard); + if (value == INITIALIZED) return 0; + if (value == INITIALIZING) + { +%% if is_cortex_m + const bool is_in_irq = __get_IPSR(); +%% else + // The hardware cannot tell us, we must assume it to be true + constexpr bool is_in_irq = true; +%% endif + // We got called from inside an interrupt, but we cannot yield back + modm_assert(not is_in_irq, "stat.rec", + "Recursive initialization of a function static!", guard); + } + value = UNINITIALIZED; } + while(not atomic_guard.compare_exchange_weak(value, INITIALIZING, + std::memory_order_acquire, std::memory_order_relaxed)); return 1; } // After this function the compiler expects `(guard & 1) == 1`! -extern "C" void __cxa_guard_release(int *guard) noexcept +extern "C" void +__cxa_guard_release(guard_type *guard) { auto atomic_guard = std::atomic_ref(*guard); - atomic_guard.store(INITIALIZED); + atomic_guard.store(INITIALIZED, std::memory_order_release); } // Called if the initialization terminates by throwing an exception. // After this function the compiler expects `(guard & 3) == 0`! -extern "C" void __cxa_guard_abort([[maybe_unused]] int *guard) noexcept +extern "C" void +__cxa_guard_abort([[maybe_unused]] guard_type *guard) { -%% if with_exceptions auto atomic_guard = std::atomic_ref(*guard); - atomic_guard.store(UNINITIALIZED); -%% endif + atomic_guard.store(UNINITIALIZED, std::memory_order_release); } %% endif diff --git a/ext/gcc/libstdc++ b/ext/gcc/libstdc++ index 9d10e00470..d37fa8184f 160000 --- a/ext/gcc/libstdc++ +++ b/ext/gcc/libstdc++ @@ -1 +1 @@ -Subproject commit 9d10e0047011f4c1e6ef4dbf76aca5e9a340a6f8 +Subproject commit d37fa8184f4f5fd8fbc645e396385896d6b435a1 diff --git a/ext/gcc/modm_atomic.hpp.in b/ext/gcc/modm_atomic.hpp.in new file mode 100644 index 0000000000..7c0bfdf9c2 --- /dev/null +++ b/ext/gcc/modm_atomic.hpp.in @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2024, Niklas Hauser + * + * This file is part of the modm project. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ +// ---------------------------------------------------------------------------- + +#pragma once + +#include + +/* We are implementing the libary interface described here: + * See https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary + * + * This header file must be included with ! + */ + +[[gnu::always_inline]] inline void +__modm_atomic_pre_barrier([[maybe_unused]] int memorder) +{ +%% if is_multicore + // On Cortex-M, this will emit a DMB instruction and a compiler fence + switch (memorder) { + case __ATOMIC_RELEASE: + case __ATOMIC_ACQ_REL: + case __ATOMIC_SEQ_CST: + __atomic_thread_fence(__ATOMIC_SEQ_CST); + } +%% else + // A compiler fence is enough + __atomic_signal_fence(__ATOMIC_SEQ_CST); +%% endif +} + +[[gnu::always_inline]] inline void +__modm_atomic_post_barrier([[maybe_unused]] int memorder) +{ +%% if is_multicore + // On Cortex-M, this will emit a DMB instruction and a compiler fence + switch (memorder) { + case __ATOMIC_CONSUME: + case __ATOMIC_ACQUIRE: + case __ATOMIC_ACQ_REL: + case __ATOMIC_SEQ_CST: + __atomic_thread_fence(__ATOMIC_SEQ_CST); + } +%% else + // A compiler fence is enough + __atomic_signal_fence(__ATOMIC_SEQ_CST); +%% endif + +} + +// ============================= generic integers ============================= +template +[[gnu::always_inline]] inline T +__modm_atomic_load_t(const volatile void *ptr, int memorder) +{ + T value{}; + __modm_atomic_pre_barrier(memorder); + { + modm::atomic::Lock _; + value = *reinterpret_cast(ptr); + } + __modm_atomic_post_barrier(memorder); + return value; +} + +%% macro atomic_load(len) +extern "C" [[gnu::always_inline]] inline {{len|u}} +__atomic_load_{{len//8}}(const volatile void *ptr, int memorder) +{ + return __modm_atomic_load_t<{{len|u}}>(ptr, memorder); +} +%% endmacro + +template +[[gnu::always_inline]] inline void +__modm_atomic_store_t(volatile void *ptr, T value, int memorder) +{ + __modm_atomic_pre_barrier(memorder); + { + modm::atomic::Lock _; + *reinterpret_cast(ptr) = value; + } + __modm_atomic_post_barrier(memorder); +} + +%% macro atomic_store(len) +extern "C" [[gnu::always_inline]] inline void +__atomic_store_{{len//8}}(volatile void *ptr, {{len|u}} value, int memorder) +{ + __modm_atomic_store_t<{{len|u}}>(ptr, value, memorder); +} +%% endmacro + +template +[[gnu::always_inline]] inline T +__modm_atomic_exchange_t(volatile void *ptr, T desired, int memorder) +{ + T previous{}; + __modm_atomic_pre_barrier(memorder); + { + modm::atomic::Lock _; + previous = *reinterpret_cast(ptr); + *reinterpret_cast(ptr) = desired; + } + __modm_atomic_post_barrier(memorder); + return previous; +} + +%% macro atomic_exchange(len) +extern "C" [[gnu::always_inline]] inline {{len|u}} +__atomic_exchange_{{len//8}}(volatile void *ptr, {{len|u}} desired, int memorder) +{ + return __modm_atomic_exchange_t<{{len|u}}>(ptr, desired, memorder); +} +%% endmacro + +template +[[gnu::always_inline]] inline bool +__modm_atomic_compare_exchange_t(volatile void *ptr, void *expected, T desired, bool weak, + int success_memorder, int failure_memorder) +{ + bool retval{false}; + __modm_atomic_pre_barrier(weak ? success_memorder : __ATOMIC_SEQ_CST); + { + modm::atomic::Lock _; + const T current = *reinterpret_cast(ptr); + if (current == *reinterpret_cast(expected)) [[likely]] + { + *reinterpret_cast(ptr) = desired; + retval = true; + } + else *reinterpret_cast(expected) = current; + } + __modm_atomic_post_barrier(weak ? (retval ? success_memorder : failure_memorder) : __ATOMIC_SEQ_CST); + return retval; +} + +%% macro atomic_compare_exchange(len) +extern "C" [[gnu::always_inline]] inline bool +__atomic_compare_exchange_{{len//8}}(volatile void *ptr, void *expected, {{len|u}} desired, + bool weak, int success_memorder, int failure_memorder) +{ + return __modm_atomic_compare_exchange_t<{{len|u}}>( + ptr, expected, desired, weak, success_memorder, failure_memorder); +} +%% endmacro + +// ================================ lock free ================================= +extern "C" [[gnu::always_inline]] inline bool +__atomic_is_lock_free (unsigned int object_size, const volatile void *ptr) +{ + // only lock free if size ≤ bus width and then also properly aligned + if (object_size <= {{bus_width//8}}) [[likely]] + return ((uintptr_t)ptr & (object_size - 1)) == 0; + return false; +} + + +%% macro atomic_fetch(len) + %% for name, op in [("and", "&"), ("or", "|"), ("xor", "^"), ("nand", "&")] + %% set prefix = "~" if name == "nand" else "" +extern "C" [[gnu::always_inline]] inline {{len|u}} +__atomic_fetch_{{name}}_{{len//8}}(volatile void *ptr, {{len|u}} value, int memorder) +{ + {{len|u}} previous{}; + __modm_atomic_pre_barrier(memorder); + { + modm::atomic::Lock _; + previous = *reinterpret_cast(ptr); + *reinterpret_cast(ptr) = {{prefix}}(previous {{op}} value); + } + __modm_atomic_post_barrier(memorder); + return previous; +} + %% endfor +%% endmacro + +%% for length in bit_lengths +// ========================= atomics for {{length}} bit integers ========================= +%% if length > bus_width +{{ atomic_load(length) }} + +{{ atomic_store(length) }} +%% endif + +{{ atomic_exchange(length) }} + +{{ atomic_compare_exchange(length) }} + +{{ atomic_fetch(length) }} +%% endfor diff --git a/ext/gcc/module_c++.lb b/ext/gcc/module_c++.lb index 4543a53830..3bd270cfa4 100644 --- a/ext/gcc/module_c++.lb +++ b/ext/gcc/module_c++.lb @@ -23,12 +23,16 @@ Depending on the module options, the compiler options are appended with either: - `-fno-exceptions`: no C++ exceptions. - `-fno-rtti`: no C++ run-time type information. +- `-fno-threadsafe-statics`: no protection of static variable initialization. or: - `-fexceptions`: with C++ exceptions. - `-frtti`: with C++ run-time type information. +The `std::atomic` interface is implemented for the AVR and Cortex-M devices. + + ## AVR A partial port of GCC libstdc++ is provided: @@ -56,14 +60,15 @@ def prepare(module, options): BooleanOption( name="rtti", default=False, description=descr_rtti)) + + if is_avr or is_cortex_m: module.add_option( BooleanOption( name="safe_statics", default=True, description=descr_safe_statics)) + module.depends(":architecture:atomic", ":architecture:assert") - if is_avr or is_cortex_m: - module.depends(":architecture:assert", ":stdc") - + module.depends(":stdc") return True @@ -71,6 +76,10 @@ def build(env): core = env[":target"].get_driver("core")["type"] is_avr = core.startswith("avr") is_cortex_m = core.startswith("cortex-m") + + if "hosted" in core: + env.collect(":build:library", "atomic") + if not (is_avr or is_cortex_m): return @@ -84,16 +93,32 @@ def build(env): "with_memory_traits": env.has_module(":architecture:memory"), "with_heap": env.has_module(":platform:heap"), "is_avr": is_avr, + "is_cortex_m": is_cortex_m, } env.outbasepath = "modm/ext/gcc" env.template("cxxabi.cpp.in") env.template("new_delete.cpp.in") + env.substitutions = { + "is_multicore": env.has_module(":platform:multicore"), + "bus_width": 8 if core.startswith("avr") else 32, + "bit_lengths": [8, 16, 32, 64] if core.startswith("avr") or core.startswith("cortex-m0") else [64], + } + filters = { + "u": lambda bits: "unsigned int" if (bits == 32 and is_cortex_m) else f"uint{bits}_t" + } + env.template("modm_atomic.hpp.in", filters=filters) + env.template("atomic.cpp.in", filters=filters) + if is_avr: env.collect(":build:path.include", "modm/ext/gcc/libstdc++/include") env.copy("libstdc++", ignore=env.ignore_files("*.lb", "*.md", "*.in", "examples")) env.template("assert.cpp.in", "assert.cpp") + env.outbasepath = "modm/ext/gcc" + env.copy("atomic") + env.collect(":build:path.include", "modm/ext/gcc") + env.collect(":build:cxxflags", "-fno-use-cxa-atexit") # Threadsafe statics if not with_threadsafe_statics: @@ -133,7 +158,7 @@ Enables the full use of C++ runtime type information. descr_safe_statics = """# C++ Safe Statics Initialization Enables safe initialization of statics inside functions and interrupts. -In case of recursive initialization the debug assertion `cxa.guard.recursion` +In case of recursive initialization the debug assertion `stat.rec` is raised. Further reading on this topic: diff --git a/ext/gcc/module_c.lb b/ext/gcc/module_c.lb index 98f99f5065..bddf10bcdc 100644 --- a/ext/gcc/module_c.lb +++ b/ext/gcc/module_c.lb @@ -20,9 +20,8 @@ def init(module): Refines the C language to make it easier to use on embedded targets. -## ARM Cortex-M -For ARMv6-M, C11 atomics are implemented via atomic lock. +## ARM Cortex-M Additional compiler options: @@ -32,24 +31,18 @@ Additional compiler options: def prepare(module, options): core = options[":target"].get_driver("core")["type"] + if core.startswith("avr") or core.startswith("cortex-m"): module.depends(":architecture:assert") - if core.startswith("cortex-m0"): - module.depends(":architecture:atomic") return True def build(env): core = env[":target"].get_driver("core")["type"] - if not (core.startswith("avr") or core.startswith("cortex-m")): - return - - env.outbasepath = "modm/ext/gcc" if core.startswith("cortex-m"): + env.outbasepath = "modm/ext/gcc" env.copy("cabi_cortex.c", "cabi.c") - if core.startswith("cortex-m0"): - env.template("atomics_c11_cortex.cpp.in") # Compiler options for targets env.collect(":build:linkflags", "--specs=nosys.specs") diff --git a/src/modm/communication/amnb/message.hpp.in b/src/modm/communication/amnb/message.hpp.in index 9842aa10e8..93bbf0e67d 100644 --- a/src/modm/communication/amnb/message.hpp.in +++ b/src/modm/communication/amnb/message.hpp.in @@ -198,7 +198,7 @@ protected: { %% if with_heap if (isLarge() and storage.large.data) { - if (*storage.large.data <= 1) delete storage.large.data; + if (*storage.large.data <= 1) delete[] storage.large.data; else (*storage.large.data)--; storage.large.data = nullptr; } diff --git a/src/modm/platform/core/rp/multicore.cpp b/src/modm/platform/core/rp/multicore.cpp index 3b62535eb8..f4ed43e8d7 100644 --- a/src/modm/platform/core/rp/multicore.cpp +++ b/src/modm/platform/core/rp/multicore.cpp @@ -35,10 +35,14 @@ runCore1(void (*entry)(), uint32_t *stack_bottom, uint32_t stack_size) { // assert(!(stack_size & 3u)); uint32_t *stack_ptr = stack_bottom + (stack_size / sizeof(uint32_t)); + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" // push 1 value onto top of stack for core1_trampoline stack_ptr -= 2; stack_ptr[0] = (uintptr_t)entry; stack_ptr[1] = (uintptr_t)core1_wrapper; +#pragma GCC diagnostic pop // Allow for the fact that the caller may have already enabled the FIFO IRQ for their // own purposes (expecting FIFO content after core 1 is launched). We must disable diff --git a/src/modm/platform/core/rp/multicore.hpp.in b/src/modm/platform/core/rp/multicore.hpp.in index 1db2e69b0b..f42597f990 100644 --- a/src/modm/platform/core/rp/multicore.hpp.in +++ b/src/modm/platform/core/rp/multicore.hpp.in @@ -12,7 +12,8 @@ #include #include -#include +// We need the spinlock itself to implement ! +// #include namespace modm::platform::multicore { @@ -94,12 +95,12 @@ public: { auto l = getInstance(); while (*l == 0); - std::atomic_thread_fence(std::memory_order_acquire); + __atomic_thread_fence(__ATOMIC_ACQUIRE); } static inline void unlock() { auto l = getInstance(); - std::atomic_thread_fence(std::memory_order_release); + __atomic_thread_fence(__ATOMIC_RELEASE); *l = 0; } }; diff --git a/src/unittest/reporter.cpp b/src/unittest/reporter.cpp index 2bc993018d..53c8be9a4a 100644 --- a/src/unittest/reporter.cpp +++ b/src/unittest/reporter.cpp @@ -17,7 +17,9 @@ namespace { - FLASH_STORAGE_STRING(invaildName) = "invalid"; + FLASH_STORAGE_STRING(invalidName) = "invalid"; + FLASH_STORAGE_STRING(suiteHeader) = ">>> "; + FLASH_STORAGE_STRING(functionHeader) = " - "; FLASH_STORAGE_STRING(failHeader) = "FAIL: "; FLASH_STORAGE_STRING(failColon) = " : "; @@ -31,8 +33,9 @@ namespace } unittest::Reporter::Reporter(modm::IODevice& device) : - outputStream(device), testName(modm::accessor::asFlash(invaildName)), - testsPassed(0), testsFailed(0) + outputStream(device), testName(modm::accessor::asFlash(invalidName)), + testFunction(modm::accessor::asFlash(invalidName)), testsPassed(0), + testsFailed(0) { } @@ -40,7 +43,16 @@ void unittest::Reporter::nextTestSuite(modm::accessor::Flash name) { testName = name; - outputStream << ">>> " << testName << modm::endl; + outputStream << modm::accessor::asFlash(suiteHeader) + << testName << modm::endl; +} + +void +unittest::Reporter::nextTestFunction(modm::accessor::Flash name) +{ + testFunction = name; + outputStream << modm::accessor::asFlash(functionHeader) + << testFunction << modm::endl; } void @@ -56,8 +68,10 @@ unittest::Reporter::reportFailure(unsigned int lineNumber) outputStream << modm::accessor::asFlash(failHeader) << testName << ':' - << lineNumber - << modm::accessor::asFlash(failColon); + << lineNumber; + if (testFunction.getPointer() != invalidName) + outputStream << ':' << testFunction; + outputStream << modm::accessor::asFlash(failColon); return outputStream; } diff --git a/src/unittest/reporter.hpp b/src/unittest/reporter.hpp index c6e40714a9..1eb109ce9f 100644 --- a/src/unittest/reporter.hpp +++ b/src/unittest/reporter.hpp @@ -49,6 +49,14 @@ namespace unittest void nextTestSuite(modm::accessor::Flash name); + /** + * \brief Switch to the next test function + * + * \param name Name of the test function + */ + void + nextTestFunction(modm::accessor::Flash name); + /** * \brief Report a passed test * @@ -80,6 +88,7 @@ namespace unittest private: modm::IOStream outputStream; modm::accessor::Flash testName; + modm::accessor::Flash testFunction; int_fast16_t testsPassed; int_fast16_t testsFailed; diff --git a/test/Makefile b/test/Makefile index 5d41f9d46b..dda2ef79ff 100644 --- a/test/Makefile +++ b/test/Makefile @@ -13,12 +13,13 @@ LBUILD = $(shell which lbuild) # SCONS = python3 `which scons` SCONS = scons +port ?= "auto" define compile-test @$(RM) -r ../build/generated-unittest/$(1) $(LBUILD) -p ../build/generated-unittest/$(1) -c config/$(1).xml $(3) \ -C ../build/generated-unittest/$(1) build --no-log - $(SCONS) -C ../build/generated-unittest/$(1) $(2) + $(SCONS) -C ../build/generated-unittest/$(1) $(2) port=$(port) endef define run-test $(call compile-test,$(1),$(2) program,$(3)) diff --git a/test/config/mega-2560-pro_C.xml b/test/config/mega-2560-pro_C.xml index e32586c037..10da923a1a 100644 --- a/test/config/mega-2560-pro_C.xml +++ b/test/config/mega-2560-pro_C.xml @@ -23,6 +23,7 @@ modm-test:test:communication:xpcc modm-test:test:platform:** modm-test:test:processing + modm-test:test:ext modm-test:test:ui diff --git a/test/modm/ext/atomics_test.cpp b/test/modm/ext/atomics_test.cpp new file mode 100644 index 0000000000..cc84f70f98 --- /dev/null +++ b/test/modm/ext/atomics_test.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2024, Niklas Hauser + * + * This file is part of the modm project. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ +// ---------------------------------------------------------------------------- + +#include "atomics_test.hpp" +#include +#include + +// ============================= atomic integrals ============================= +template< typename T > +static void +test(std::atomic &a) +{ + // we are only testing the API, not the actual atomic properties + // note: these memory order make no sense, they are only defined for testing + TEST_ASSERT_EQUALS(a.load(), T(0)); + a.store(1, std::memory_order_relaxed); + TEST_ASSERT_EQUALS(a.load(std::memory_order_relaxed), T(1)); + + TEST_ASSERT_EQUALS(a.exchange(T(2), std::memory_order_acquire), T(1)); + TEST_ASSERT_EQUALS(a.exchange(T(3), std::memory_order_release), T(2)); + + T value{2}; + TEST_ASSERT_FALSE(a.compare_exchange_weak(value, T(4))); + + value = T(3); + TEST_ASSERT_TRUE(a.compare_exchange_weak(value, T(4), + std::memory_order_seq_cst, std::memory_order_consume)); + value = T(4); + TEST_ASSERT_TRUE(a.compare_exchange_strong(value, T(5), std::memory_order_seq_cst)); + + TEST_ASSERT_EQUALS(a.fetch_add(T(2), std::memory_order_relaxed), T(5)); + TEST_ASSERT_EQUALS(a.load(std::memory_order_relaxed), T(7)); + TEST_ASSERT_EQUALS(++a, T(8)); + + TEST_ASSERT_EQUALS(a.fetch_sub(T(2), std::memory_order_relaxed), T(8)); + TEST_ASSERT_EQUALS(a.load(std::memory_order_relaxed), T(6)); + TEST_ASSERT_EQUALS(--a, T(5)); + + TEST_ASSERT_EQUALS(a.fetch_and(T(0b1110), std::memory_order_relaxed), T(5)); + TEST_ASSERT_EQUALS(a.fetch_or(T(0b1000), std::memory_order_relaxed), T(4)); + TEST_ASSERT_EQUALS(a.fetch_xor(T(0b1000), std::memory_order_relaxed), T(0b1100)); + TEST_ASSERT_EQUALS(a.load(std::memory_order_relaxed), T(0b0100)); +} + +static std::atomic a8{}; +void +AtomicsTest::testAtomic8() +{ + TEST_ASSERT_TRUE(a8.is_lock_free()); + test(a8); +} + +static std::atomic a16{}; +void +AtomicsTest::testAtomic16() +{ +#ifdef MODM_CPU_AVR + TEST_ASSERT_FALSE(a16.is_lock_free()); +#else + TEST_ASSERT_TRUE(a16.is_lock_free()); +#endif + test(a16); +} + +static std::atomic a32{}; +void +AtomicsTest::testAtomic32() +{ +#ifdef MODM_CPU_AVR + TEST_ASSERT_FALSE(a32.is_lock_free()); +#else + TEST_ASSERT_TRUE(a32.is_lock_free()); +#endif + test(a32); +} + +static std::atomic a64{}; +void +AtomicsTest::testAtomic64() +{ +#ifdef MODM_OS_HOSTED + TEST_ASSERT_TRUE(a64.is_lock_free()); +#else + TEST_ASSERT_FALSE(a64.is_lock_free()); +#endif + test(a64); +} + +// ============================== atomic arrays =============================== +struct Array3 +{ + uint8_t v[3]; +}; +static std::atomic array3{}; +void +AtomicsTest::testAtomicArray3() +{ + using T = Array3; + constexpr size_t size = sizeof(T); + + TEST_ASSERT_FALSE(array3.is_lock_free()); + + TEST_ASSERT_EQUALS_ARRAY(array3.load().v, T{}.v, size); + array3.store(T{1,2,3}, std::memory_order_relaxed); + TEST_ASSERT_EQUALS_ARRAY(array3.load(std::memory_order_relaxed).v, (T{1,2,3}).v, size); + + TEST_ASSERT_EQUALS_ARRAY(array3.exchange(T{2,3,4}, std::memory_order_acquire).v, + (T{1,2,3}).v, size); + TEST_ASSERT_EQUALS_ARRAY(array3.exchange(T{3,4,5}, std::memory_order_release).v, + (T{2,3,4}).v, size); + + T value{1,2,3}; + TEST_ASSERT_FALSE(array3.compare_exchange_weak(value, T{4,5,6})); + + value = T{3,4,5}; + TEST_ASSERT_TRUE(array3.compare_exchange_weak(value, T{4,5,6}, + std::memory_order_seq_cst, std::memory_order_consume)); + value = T{4,5,6}; + TEST_ASSERT_TRUE(array3.compare_exchange_strong(value, T{5,6,7}, + std::memory_order_seq_cst)); +} + +struct Array +{ + uint8_t v[15]; +}; +static std::atomic array{}; +void +AtomicsTest::testAtomicArray() +{ + using T = Array; + constexpr size_t size = sizeof(T); + + TEST_ASSERT_FALSE(array.is_lock_free()); + + TEST_ASSERT_EQUALS_ARRAY(array.load().v, T{}.v, size); + array.store(T{1,2,3,4,5,6,7,8}, std::memory_order_relaxed); + TEST_ASSERT_EQUALS_ARRAY(array.load(std::memory_order_relaxed).v, (T{1,2,3,4,5,6,7,8}).v, size); + + TEST_ASSERT_EQUALS_ARRAY(array.exchange(T{2,3,4,5,6,7,8,9}, std::memory_order_acquire).v, + (T{1,2,3,4,5,6,7,8}).v, size); + TEST_ASSERT_EQUALS_ARRAY(array.exchange(T{3,4,5,6,7,8,9,10}, std::memory_order_release).v, + (T{2,3,4,5,6,7,8,9}).v, size); + + T value{1,2,3,4,5,6,7,8}; + TEST_ASSERT_FALSE(array.compare_exchange_weak(value, T{4,5,6,7,8,9,10,11})); + + value = T{3,4,5,6,7,8,9,10}; + TEST_ASSERT_TRUE(array.compare_exchange_weak(value, T{4,5,6,7,8,9,10,11}, + std::memory_order_seq_cst, std::memory_order_consume)); + value = T{4,5,6,7,8,9,10,11}; + TEST_ASSERT_TRUE(array.compare_exchange_strong(value, T{5,6,7,8,9,10,11,12}, + std::memory_order_seq_cst)); +} + +// =============================== atomic flags =============================== +static std::atomic_flag af{}; +void +AtomicsTest::testAtomicFlag() +{ + TEST_ASSERT_FALSE(af.test_and_set()); + TEST_ASSERT_TRUE(af.test()); + + TEST_ASSERT_TRUE(af.test_and_set()); + af.clear(); + TEST_ASSERT_FALSE(af.test()); + TEST_ASSERT_FALSE(af.test_and_set()); + + TEST_ASSERT_TRUE(af.test()); +} diff --git a/test/modm/ext/atomics_test.hpp b/test/modm/ext/atomics_test.hpp new file mode 100644 index 0000000000..06fa269910 --- /dev/null +++ b/test/modm/ext/atomics_test.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023, Niklas Hauser + * + * This file is part of the modm project. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ +// ---------------------------------------------------------------------------- + +#include + +/// @ingroup modm_test_test_utils +class AtomicsTest : public unittest::TestSuite +{ +public: + void + testAtomic8(); + + void + testAtomic16(); + + void + testAtomic32(); + + void + testAtomic64(); + + void + testAtomicArray3(); + + void + testAtomicArray(); + + void + testAtomicFlag(); +}; diff --git a/test/modm/ext/cxa_guard_test.cpp b/test/modm/ext/cxa_guard_test.cpp index b66072cf6e..aa89e13f69 100644 --- a/test/modm/ext/cxa_guard_test.cpp +++ b/test/modm/ext/cxa_guard_test.cpp @@ -10,10 +10,28 @@ // ---------------------------------------------------------------------------- #include "cxa_guard_test.hpp" +#include + + +extern "C" +{ + +#ifdef MODM_CPU_ARM +using guard_t = uint32_t; +#else +using guard_t = uint64_t; +#endif + +int __cxa_guard_acquire(guard_t*); +void __cxa_guard_release(guard_t*); +void __cxa_guard_abort(guard_t*); + +} + namespace { - int guard{0}; + guard_t guard{0}; uint8_t constructor_calls{0}; struct StaticClass @@ -37,25 +55,32 @@ namespace } } - void CxaGuardTest::testGuard() { - TEST_ASSERT_EQUALS(guard, 0); + TEST_ASSERT_EQUALS(guard, guard_t(0)); + + TEST_ASSERT_EQUALS(__cxa_guard_acquire(&guard), 1); +#ifndef MODM_OS_HOSTED + TEST_ASSERT_EQUALS(guard, guard_t(0x10)); +#endif + + __cxa_guard_abort(&guard); + TEST_ASSERT_EQUALS(guard, guard_t(0)); - int retval = __cxa_guard_acquire(&guard); - TEST_ASSERT_EQUALS(retval, 1); - TEST_ASSERT_EQUALS(guard, 0x100); + TEST_ASSERT_EQUALS(__cxa_guard_acquire(&guard), 1); +#ifndef MODM_OS_HOSTED + TEST_ASSERT_EQUALS(guard, guard_t(0x10)); +#endif __cxa_guard_release(&guard); - TEST_ASSERT_EQUALS(guard, 1); + TEST_ASSERT_EQUALS(guard, guard_t(1)); - retval = __cxa_guard_acquire(&guard); - TEST_ASSERT_EQUALS(retval, 0); - TEST_ASSERT_EQUALS(guard, 1); + TEST_ASSERT_EQUALS(__cxa_guard_acquire(&guard), 0); + TEST_ASSERT_EQUALS(guard, guard_t(1)); __cxa_guard_release(&guard); - TEST_ASSERT_EQUALS(guard, 1); + TEST_ASSERT_EQUALS(guard, guard_t(1)); } void diff --git a/test/modm/ext/module.lb b/test/modm/ext/module.lb index b8390f3c39..570a98712d 100644 --- a/test/modm/ext/module.lb +++ b/test/modm/ext/module.lb @@ -17,8 +17,7 @@ def init(module): def prepare(module, options): module.depends("modm:stdc++") - core = options[":target"].get_driver("core")["type"] - return core.startswith("cortex-m") + return True def build(env): diff --git a/tools/build_script_generator/scons/site_tools/unittestm.py b/tools/build_script_generator/scons/site_tools/unittestm.py index 99eb508285..c44f8f0dd6 100644 --- a/tools/build_script_generator/scons/site_tools/unittestm.py +++ b/tools/build_script_generator/scons/site_tools/unittestm.py @@ -26,7 +26,8 @@ # ----------------------------------------------------------------------------- def unittest_action(target, source, env): unit_test.render_runner(headers=(str(s) for s in source), - destination=target[0].abspath) + destination=target[0].abspath, + functions="hosted" in env.get("CONFIG_DEVICE_NAME")) return 0 def unittest_emitter(target, source, env): diff --git a/tools/modm_tools/unit_test.py b/tools/modm_tools/unit_test.py index 9a06f0e2d5..9db3abf818 100644 --- a/tools/modm_tools/unit_test.py +++ b/tools/modm_tools/unit_test.py @@ -41,7 +41,7 @@ class TestClass : public unittest::TestSuite # ----------------------------------------------------------------------------- -TEMPLATE_UNITTEST = """\ +TEMPLATE_UNITTEST = r"""\ #include {% for test in tests %} @@ -50,27 +50,35 @@ class TestClass : public unittest::TestSuite namespace { -{% for test in tests %} -FLASH_STORAGE_STRING({{test.instance}}Name) = "{{test.file}}"; -{% endfor %} +{% for test in tests -%} +FLASH_STORAGE_STRING({{test.instance}}Name) = "{{test.file[:-5]}}"; +{%- if functions %} +{% for test_case in test.test_cases -%} +FLASH_STORAGE_STRING({{test.instance}}_{{test_case}}Name) = "{{test_case[4:]}}"; +{% endfor -%} +{% endif -%} +{% endfor -%} } int run_modm_unit_test() { using namespace modm::accessor; -{% for test in tests %} +{% for test in tests -%} unittest::reporter.nextTestSuite(asFlash({{test.instance}}Name)); { {{test.class}} {{test.instance}}; - {% for test_case in test.test_cases %} + {%- for test_case in test.test_cases %} + {% if functions -%} + unittest::reporter.nextTestFunction(asFlash({{test.instance}}_{{test_case}}Name)); + {% endif -%} {{test.instance}}.setUp(); {{test.instance}}.{{test_case}}(); {{test.instance}}.tearDown(); - {% endfor %} + {% endfor -%} } -{% endfor %} +{%- endfor %} return unittest::reporter.printSummary(); } @@ -106,9 +114,10 @@ def extract_tests(headers): return sorted(tests, key=lambda t: t["file"]) -def render_runner(headers, destination=None): +def render_runner(headers, destination=None, functions=False): tests = extract_tests(headers) - content = Environment().from_string(TEMPLATE_UNITTEST).render({"tests": tests}) + content = Environment().from_string(TEMPLATE_UNITTEST) + content = content.render({"tests": tests, "functions": functions}) if destination is not None: Path(destination).write_text(content) @@ -128,6 +137,12 @@ def render_runner(headers, destination=None): dest="destination", default="unittest_runner.cpp", help="Generated runner file.") + parser.add_argument( + "-fn", "--with-function-names", + dest="functions", + default=False, + action="store_true", + help="Generate with test function name.") args = parser.parse_args() headers = find_files.scan(args.path, ["_test"+h for h in find_files.HEADER])