From 0485da997cf78041b8dba9b698fff8a505715a0d Mon Sep 17 00:00:00 2001 From: davidtrevelyan Date: Fri, 1 Dec 2023 22:20:04 +0000 Subject: [PATCH] Introduce RealtimeSanitizer (RADSan) real-time safety checking (#121) * Introduce configurable RADSan checks * Fix test fail output and comment RADSan config * Re-enable workflow restrictions * Add RADSan status badge to README * Add realtime attribute to Layer methods --------- Co-authored-by: jatinchowdhury18 --- .github/workflows/radsan.yml | 34 ++++++++++++++ CMakeLists.txt | 1 + README.md | 1 + RTNeural/CMakeLists.txt | 4 ++ RTNeural/Model.h | 7 +-- RTNeural/ModelT.h | 12 ++--- RTNeural/RTNeural.h | 15 +------ RTNeural/activation/activation.h | 41 ++++++++--------- RTNeural/activation/activation_eigen.h | 45 ++++++++++--------- RTNeural/activation/activation_xsimd.h | 45 ++++++++++--------- RTNeural/batchnorm/batchnorm.h | 33 +++++++------- RTNeural/batchnorm/batchnorm2d.h | 33 +++++++------- RTNeural/batchnorm/batchnorm2d_eigen.h | 33 +++++++------- RTNeural/batchnorm/batchnorm2d_xsimd.h | 33 +++++++------- RTNeural/batchnorm/batchnorm_eigen.h | 33 +++++++------- RTNeural/batchnorm/batchnorm_xsimd.h | 33 +++++++------- RTNeural/config.h | 37 +++++++++++++++ RTNeural/conv1d/conv1d.h | 25 ++++++----- RTNeural/conv1d/conv1d_eigen.h | 27 +++++------ RTNeural/conv1d/conv1d_xsimd.h | 31 ++++++------- RTNeural/conv1d_stateless/conv1d_stateless.h | 23 +++++----- .../conv1d_stateless/conv1d_stateless_eigen.h | 25 ++++++----- .../conv1d_stateless/conv1d_stateless_xsimd.h | 23 +++++----- RTNeural/conv2d/conv2d.h | 35 ++++++++------- RTNeural/conv2d/conv2d_eigen.h | 33 +++++++------- RTNeural/conv2d/conv2d_xsimd.h | 33 +++++++------- RTNeural/dense/dense.h | 33 +++++++------- RTNeural/dense/dense_eigen.h | 23 +++++----- RTNeural/dense/dense_xsimd.h | 43 +++++++++--------- RTNeural/gru/gru.h | 35 ++++++++------- RTNeural/gru/gru_eigen.h | 33 +++++++------- RTNeural/gru/gru_xsimd.h | 35 ++++++++------- RTNeural/lstm/lstm.h | 23 +++++----- RTNeural/lstm/lstm_eigen.h | 21 ++++----- RTNeural/lstm/lstm_xsimd.h | 23 +++++----- cmake/Sanitizers.cmake | 7 +++ cmake/Testing.cmake | 2 +- 37 files changed, 536 insertions(+), 437 deletions(-) create mode 100644 .github/workflows/radsan.yml create mode 100644 RTNeural/config.h create mode 100644 cmake/Sanitizers.cmake diff --git a/.github/workflows/radsan.yml b/.github/workflows/radsan.yml new file mode 100644 index 0000000..c007d5b --- /dev/null +++ b/.github/workflows/radsan.yml @@ -0,0 +1,34 @@ +name: RADSan Real-Time Safety + +on: + push: + branches: + - main + - develop + pull_request: + branches: + - main + - develop + +jobs: + build_and_test: + name: Check real-time safety with RADSan + runs-on: ubuntu-latest + container: realtimesanitizer/radsan-clang:latest + steps: + - name: Install CMake and Git + run: apt-get update && apt-get install -y cmake git + + - name: Checkout code + uses: actions/checkout@v2 + with: + submodules: true + + - name: Configure + run: cmake -Bbuild -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=ON -DRTNEURAL_ENABLE_RADSAN=ON + + - name: Build + run: cmake --build build --config Release --parallel + + - name: Test + run: cd build && ctest -C Release --output-on-failure diff --git a/CMakeLists.txt b/CMakeLists.txt index d1bf3a7..671abb4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,7 @@ cmake_minimum_required(VERSION 3.5) project(RTNeural VERSION 1.0.0) include(cmake/CXXStandard.cmake) +include(cmake/Sanitizers.cmake) add_subdirectory(RTNeural) diff --git a/README.md b/README.md index f62240e..1b23870 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ [![Tests](https://github.com/jatinchowdhury18/RTNeural/workflows/Tests/badge.svg)](https://github.com/jatinchowdhury18/RTNeural/actions/workflows/tests.yml) [![Bench](https://github.com/jatinchowdhury18/RTNeural/workflows/Bench/badge.svg)](https://github.com/jatinchowdhury18/RTNeural/actions/workflows/bench.yml) [![Examples](https://github.com/jatinchowdhury18/RTNeural/actions/workflows/examples.yml/badge.svg)](https://github.com/jatinchowdhury18/RTNeural/actions/workflows/examples.yml) +[![RADSan](https://github.com/jatinchowdhury18/RTNeural/actions/workflows/radsan.yml/badge.svg)](https://github.com/jatinchowdhury18/RTNeural/actions/workflows/radsan.yml) [![codecov](https://codecov.io/gh/jatinchowdhury18/RTNeural/branch/main/graph/badge.svg?token=QBEBVSCQTW)](https://codecov.io/gh/jatinchowdhury18/RTNeural) [![arXiv](https://img.shields.io/badge/arXiv-2106.03037-b31b1b.svg)](https://arxiv.org/abs/2106.03037) [![License](https://img.shields.io/badge/License-BSD-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) diff --git a/RTNeural/CMakeLists.txt b/RTNeural/CMakeLists.txt index 49a51e7..81c15df 100755 --- a/RTNeural/CMakeLists.txt +++ b/RTNeural/CMakeLists.txt @@ -51,3 +51,7 @@ target_compile_definitions(RTNeural PUBLIC RTNEURAL_NAMESPACE=${RTNEURAL_NAMESPACE} ) + +if(RTNEURAL_ENABLE_RADSAN) + rtneural_radsan_configure(RTNeural) +endif() diff --git a/RTNeural/Model.h b/RTNeural/Model.h index d23f1c0..d2a60a3 100644 --- a/RTNeural/Model.h +++ b/RTNeural/Model.h @@ -9,6 +9,7 @@ #include "batchnorm/batchnorm.tpp" #include "batchnorm/batchnorm2d.h" #include "batchnorm/batchnorm2d.tpp" +#include "config.h" #include "conv1d/conv1d.h" #include "conv1d/conv1d.tpp" #include "conv2d/conv2d.h" @@ -71,14 +72,14 @@ class Model } /** Resets the state of the network layers. */ - void reset() + RTNEURAL_REALTIME void reset() { for(auto* l : layers) l->reset(); } /** Performs forward propagation for this model. */ - inline T forward(const T* input) + RTNEURAL_REALTIME inline T forward(const T* input) { layers[0]->forward(input, outs[0].data()); @@ -91,7 +92,7 @@ class Model } /** Returns a pointer to the output of the final layer in the network. */ - inline const T* getOutputs() const noexcept + RTNEURAL_REALTIME inline const T* getOutputs() const noexcept { return outs.back().data(); } diff --git a/RTNeural/ModelT.h b/RTNeural/ModelT.h index e63eb32..f3278c1 100644 --- a/RTNeural/ModelT.h +++ b/RTNeural/ModelT.h @@ -351,20 +351,20 @@ class ModelT /** Get a reference to the layer at index `Index`. */ template - auto& get() noexcept + RTNEURAL_REALTIME auto& get() noexcept { return std::get(layers); } /** Get a reference to the layer at index `Index`. */ template - const auto& get() const noexcept + RTNEURAL_REALTIME const auto& get() const noexcept { return std::get(layers); } /** Resets the state of the network layers. */ - void reset() + RTNEURAL_REALTIME void reset() { modelt_detail::forEachInTuple([&](auto& layer, size_t) { layer.reset(); }, @@ -373,7 +373,7 @@ class ModelT /** Performs forward propagation for this model. */ template - inline typename std::enable_if<(N > 1), T>::type + RTNEURAL_REALTIME inline typename std::enable_if<(N > 1), T>::type forward(const T* input) { #if RTNEURAL_USE_XSIMD @@ -400,7 +400,7 @@ class ModelT /** Performs forward propagation for this model. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T* input) { #if RTNEURAL_USE_XSIMD @@ -426,7 +426,7 @@ class ModelT } /** Returns a pointer to the output of the final layer in the network. */ - inline const T* getOutputs() const noexcept + RTNEURAL_REALTIME inline const T* getOutputs() const noexcept { return outs; } diff --git a/RTNeural/RTNeural.h b/RTNeural/RTNeural.h index af7e450..18a59d3 100644 --- a/RTNeural/RTNeural.h +++ b/RTNeural/RTNeural.h @@ -5,21 +5,8 @@ // C++ STL includes #include -#ifndef RTNEURAL_NAMESPACE -#define RTNEURAL_NAMESPACE RTNeural -#endif - -// Handle default RTNeural defines -#ifndef RTNEURAL_DEFAULT_ALIGNMENT -#if _MSC_VER -#pragma message("RTNEURAL_DEFAULT_ALIGNMENT was not defined! Using default alignment = 16.") -#else -#warning "RTNEURAL_DEFAULT_ALIGNMENT was not defined! Using default alignment = 16." -#endif -#define RTNEURAL_DEFAULT_ALIGNMENT 16 -#endif - // RTNeural includes: +#include "config.h" #include "Model.h" #include "ModelT.h" #include "model_loader.h" diff --git a/RTNeural/activation/activation.h b/RTNeural/activation/activation.h index 3cc19c2..6f80bbb 100644 --- a/RTNeural/activation/activation.h +++ b/RTNeural/activation/activation.h @@ -2,6 +2,7 @@ #define ACTIVATION_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -24,7 +25,7 @@ class Activation : public Layer std::string getName() const noexcept override { return name; } /** Implements the forward propagation step for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < Layer::out_size; ++i) out[i] = func(input[i]); @@ -71,7 +72,7 @@ class TanhActivation final : public Activation } /** Performs forward propagation for tanh activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < Layer::out_size; ++i) out[i] = MathsProvider::tanh(input[i]); @@ -94,10 +95,10 @@ class TanhActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for tanh activation. */ - inline void forward(const T (&ins)[size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[size]) noexcept { for(int i = 0; i < size; ++i) outs[i] = MathsProvider::tanh(ins[i]); @@ -142,10 +143,10 @@ class ReLuActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for ReLU activation. */ - inline void forward(const T (&ins)[size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[size]) noexcept { for(int i = 0; i < size; ++i) outs[i] = std::max((T)0, ins[i]); @@ -190,10 +191,10 @@ class SigmoidActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for sigmoid activation. */ - inline void forward(const T (&ins)[size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[size]) noexcept { for(int i = 0; i < size; ++i) outs[i] = MathsProvider::sigmoid(ins[i]); @@ -222,7 +223,7 @@ class SoftmaxActivation final : public Activation } /** Performs forward propagation for softmax activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { T exp_sum = 0; for(int i = 0; i < Layer::out_size; ++i) @@ -255,10 +256,10 @@ class SoftmaxActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for softmax activation. */ - inline void forward(const T (&ins)[size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[size]) noexcept { T exp_sum = 0; for(int i = 0; i < size; ++i) @@ -297,7 +298,7 @@ class ELuActivation final : public Activation } /** Sets a custom value for the layer's "alpha" parameter. */ - void set_alpha(T newAlpha) { alpha = newAlpha; } + RTNEURAL_REALTIME void set_alpha(T newAlpha) { alpha = newAlpha; } private: T alpha = (T)1; @@ -319,11 +320,11 @@ class ELuActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for elu activation. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[size]) noexcept { for(int i = 0; i < size; ++i) @@ -332,7 +333,7 @@ class ELuActivationT /** Performs forward propagation for elu activation (with custom alpha parameter). */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[size]) noexcept { static constexpr T alpha = (T)AlphaNumerator / (T)AlphaDenominator; @@ -355,13 +356,13 @@ class PReLUActivation final : public Activation } /** Performs forward propagation for prelu activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(auto i = 0; i < Layer::in_size; ++i) out[i] = input[i] >= (T)0 ? input[i] : (input[i] * alpha[i]); } - void setAlphaVals(const std::vector& alphaVals) + RTNEURAL_REALTIME void setAlphaVals(const std::vector& alphaVals) { if(alphaVals.size() == 1) { @@ -399,16 +400,16 @@ class PReLUActivationT /** Returns false since this layer has weights even though it is an activation layer. */ constexpr bool isActivation() const noexcept { return false; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for prelu activation. */ - inline void forward(const T (&ins)[size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[size]) noexcept { for(auto i = 0; i < size; ++i) outs[i] = ins[i] >= (T)0 ? ins[i] : (ins[i] * alpha[i]); } - void setAlphaVals(const std::vector& alphaVals) + RTNEURAL_REALTIME void setAlphaVals(const std::vector& alphaVals) { if(alphaVals.size() == 1) { diff --git a/RTNeural/activation/activation_eigen.h b/RTNeural/activation/activation_eigen.h index 18699b6..cdfecb0 100644 --- a/RTNeural/activation/activation_eigen.h +++ b/RTNeural/activation/activation_eigen.h @@ -2,6 +2,7 @@ #define ACTIVATIONEIGEN_H_INCLUDED #include "../common.h" +#include "../config.h" #include "../maths/maths_eigen.h" namespace RTNEURAL_NAMESPACE @@ -26,7 +27,7 @@ class TanhActivation : public Activation } /** Performs forward propagation for tanh activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -61,10 +62,10 @@ class TanhActivationT /** Returns true if this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for tanh activation. */ - inline void forward(const v_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const v_type& ins) noexcept { outs = MathsProvider::tanh(ins); } @@ -94,7 +95,7 @@ class ReLuActivation : public Activation } /** Performs forward propagation for ReLU activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -129,10 +130,10 @@ class ReLuActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for ReLU activation. */ - inline void forward(const v_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const v_type& ins) noexcept { outs = ins.array().max((T)0); } @@ -163,7 +164,7 @@ class SigmoidActivation : public Activation } /** Performs forward propagation for sigmoid activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -198,10 +199,10 @@ class SigmoidActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for sigmoid activation. */ - inline void forward(const v_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const v_type& ins) noexcept { outs = MathsProvider::sigmoid(ins); } @@ -231,7 +232,7 @@ class SoftmaxActivation : public Activation } /** Performs forward propagation for softmax activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -267,10 +268,10 @@ class SoftmaxActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for softmax activation. */ - inline void forward(const v_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const v_type& ins) noexcept { outs = MathsProvider::exp(ins); outs = outs / outs.sum(); @@ -302,7 +303,7 @@ class ELuActivation : public Activation } /** Performs forward propagation for softmax activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -315,7 +316,7 @@ class ELuActivation : public Activation Eigen::Matrix outVec; /** Sets a custom value for the layer's "alpha" parameter. */ - void set_alpha(T newAlpha) { alpha = newAlpha; } + RTNEURAL_REALTIME void set_alpha(T newAlpha) { alpha = newAlpha; } private: const Eigen::Matrix ones; @@ -344,11 +345,11 @@ class ELuActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for elu activation. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type& ins) noexcept { outs = (ins.array() > (T)0).select(ins, MathsProvider::exp(ins) - ones.array()); @@ -356,7 +357,7 @@ class ELuActivationT /** Performs forward propagation for elu activation (with custom alpha parameter). */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type& ins) noexcept { static constexpr T alpha = (T)AlphaNumerator / (T)AlphaDenominator; @@ -384,7 +385,7 @@ class PReLUActivation final : public Activation } /** Performs forward propagation for prelu activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -393,7 +394,7 @@ class PReLUActivation final : public Activation std::copy(outVec.data(), outVec.data() + Layer::in_size, out); } - void setAlphaVals(const std::vector& alphaVals) + RTNEURAL_REALTIME void setAlphaVals(const std::vector& alphaVals) { if(alphaVals.size() == 1) { @@ -435,15 +436,15 @@ class PReLUActivationT /** Returns false since this layer has weights even though it is an activation layer. */ constexpr bool isActivation() const noexcept { return false; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for prelu activation. */ - inline void forward(const v_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const v_type& ins) noexcept { outs = (ins.array() >= (T)0).select(ins, alpha.cwiseProduct(ins)); } - void setAlphaVals(const std::vector& alphaVals) + RTNEURAL_REALTIME void setAlphaVals(const std::vector& alphaVals) { if(alphaVals.size() == 1) { diff --git a/RTNeural/activation/activation_xsimd.h b/RTNeural/activation/activation_xsimd.h index 2b201e9..c903f9b 100644 --- a/RTNeural/activation/activation_xsimd.h +++ b/RTNeural/activation/activation_xsimd.h @@ -2,6 +2,7 @@ #define ACTIVATIONXSIMD_H_INCLUDED #include "../common.h" +#include "../config.h" #include "../maths/maths_xsimd.h" namespace RTNEURAL_NAMESPACE @@ -24,7 +25,7 @@ class TanhActivation : public Activation } /** Performs forward propagation for tanh activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { tanh(input, out, Layer::in_size); } @@ -54,10 +55,10 @@ class TanhActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for tanh activation. */ - inline void forward(const v_type (&ins)[v_io_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < v_io_size; ++i) outs[i] = MathsProvider::tanh(ins[i]); @@ -84,7 +85,7 @@ class ReLuActivation : public Activation } /** Performs forward propagation for ReLU activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { xsimd::transform( input, &input[Layer::in_size], zeros.begin(), out, @@ -119,10 +120,10 @@ class ReLuActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for ReLU activation. */ - inline void forward(const v_type (&ins)[v_io_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < v_io_size; ++i) outs[i] = xsimd::max(ins[i], v_type((T)0)); @@ -148,7 +149,7 @@ class SigmoidActivation : public Activation } /** Performs forward propagation for sigmoid activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { sigmoid(input, out, Layer::in_size); } @@ -178,10 +179,10 @@ class SigmoidActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for sigmoid activation. */ - inline void forward(const v_type (&ins)[v_io_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < v_io_size; ++i) outs[i] = MathsProvider::sigmoid(ins[i]); @@ -207,7 +208,7 @@ class SoftmaxActivation : public Activation } /** Performs forward propagation for softmax activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { softmax(input, out, Layer::in_size); } @@ -237,10 +238,10 @@ class SoftmaxActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for softmax activation. */ - inline void forward(const v_type (&ins)[v_io_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_io_size]) noexcept { v_type exp_sum {}; for(int i = 0; i < v_io_size; ++i) @@ -274,13 +275,13 @@ class ELuActivation final : public Activation } /** Performs forward propagation for softmax activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { elu(input, out, Layer::in_size, alpha); } /** Sets a custom value for the layer's "alpha" parameter. */ - void set_alpha(T newAlpha) { alpha = newAlpha; } + RTNEURAL_REALTIME void set_alpha(T newAlpha) { alpha = newAlpha; } private: T alpha = (T)1; @@ -306,11 +307,11 @@ class ELuActivationT /** Returns true since this layer is an activation layer. */ constexpr bool isActivation() const noexcept { return true; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for elu activation. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < v_io_size; ++i) @@ -319,7 +320,7 @@ class ELuActivationT /** Performs forward propagation for elu activation (with custom alpha parameter). */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_io_size]) noexcept { static constexpr T alpha = (T)AlphaNumerator / (T)AlphaDenominator; @@ -342,7 +343,7 @@ class PReLUActivation final : public Activation } /** Performs forward propagation for prelu activation. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { using b_type = xsimd::simd_type; constexpr auto inc = (int)b_type::size; @@ -362,7 +363,7 @@ class PReLUActivation final : public Activation out[i] = input[i] >= (T)0 ? input[i] : (input[i] * alpha[i]); } - void setAlphaVals(const std::vector& alphaVals) + RTNEURAL_REALTIME void setAlphaVals(const std::vector& alphaVals) { if(alphaVals.size() == 1) { @@ -404,16 +405,16 @@ class PReLUActivationT /** Returns false since this layer has weights even though it is an activation layer. */ constexpr bool isActivation() const noexcept { return false; } - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for prelu activation. */ - inline void forward(const v_type (&ins)[v_io_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < v_io_size; ++i) outs[i] = xsimd::select(ins[i] >= (T)0, ins[i], ins[i] * alpha[i]); } - void setAlphaVals(const std::vector& alphaVals) + RTNEURAL_REALTIME void setAlphaVals(const std::vector& alphaVals) { if(alphaVals.size() == 1) { diff --git a/RTNeural/batchnorm/batchnorm.h b/RTNeural/batchnorm/batchnorm.h index 2c66821..3cb1a94 100644 --- a/RTNeural/batchnorm/batchnorm.h +++ b/RTNeural/batchnorm/batchnorm.h @@ -10,6 +10,7 @@ #else #include "../Layer.h" #include "../common.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -25,26 +26,26 @@ class BatchNorm1DLayer final : public Layer std::string getName() const noexcept override { return "batchnorm"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < Layer::out_size; ++i) out[i] = multiplier[i] * (input[i] - running_mean[i]) + beta[i]; } /** Sets the layer "gamma" values. */ - void setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME void setGamma(const std::vector& gammaVals); /** Sets the layer "beta" values. */ - void setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME void setBeta(const std::vector& betaVals); /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); private: void updateMultiplier(); @@ -78,11 +79,11 @@ class BatchNorm1DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[in_size]) noexcept { for(int i = 0; i < size; ++i) @@ -91,7 +92,7 @@ class BatchNorm1DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[in_size]) noexcept { for(int i = 0; i < size; ++i) @@ -100,28 +101,28 @@ class BatchNorm1DT /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector& gammaVals); /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector&) { } /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector& betaVals); /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector&) { } /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); T outs alignas(RTNEURAL_DEFAULT_ALIGNMENT)[out_size]; diff --git a/RTNeural/batchnorm/batchnorm2d.h b/RTNeural/batchnorm/batchnorm2d.h index a11dbca..c9e8db7 100644 --- a/RTNeural/batchnorm/batchnorm2d.h +++ b/RTNeural/batchnorm/batchnorm2d.h @@ -9,6 +9,7 @@ #include "batchnorm2d_xsimd.tpp" #else #include "../Layer.h" +#include "../config.h" namespace RTNEURAL_NAMESPACE { @@ -23,7 +24,7 @@ class BatchNorm2DLayer final : public Layer std::string getName() const noexcept override { return "batchnorm2d"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < num_features; i++) { @@ -35,19 +36,19 @@ class BatchNorm2DLayer final : public Layer } /** Sets the layer "gamma" values. */ - void setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME void setGamma(const std::vector& gammaVals); /** Sets the layer "beta" values. */ - void setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME void setBeta(const std::vector& betaVals); /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); private: void updateMultiplier(); @@ -86,11 +87,11 @@ class BatchNorm2DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[in_size]) noexcept { for(int i = 0; i < num_features; i++) @@ -104,7 +105,7 @@ class BatchNorm2DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[in_size]) noexcept { for(int i = 0; i < num_features; i++) @@ -118,28 +119,28 @@ class BatchNorm2DT /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector& gammaVals); /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector&) { } /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector& betaVals); /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector&) { } /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); T outs alignas(RTNEURAL_DEFAULT_ALIGNMENT)[out_size]; diff --git a/RTNeural/batchnorm/batchnorm2d_eigen.h b/RTNeural/batchnorm/batchnorm2d_eigen.h index e3ee59d..6d17723 100644 --- a/RTNeural/batchnorm/batchnorm2d_eigen.h +++ b/RTNeural/batchnorm/batchnorm2d_eigen.h @@ -2,6 +2,7 @@ #define BATCHNORM2DEIGEN_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -17,7 +18,7 @@ class BatchNorm2DLayer final : public Layer std::string getName() const noexcept override { return "batchnorm2d"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { auto inMat = Eigen::Map, RTNeuralEigenAlignment>( input, num_filters, num_features); @@ -33,19 +34,19 @@ class BatchNorm2DLayer final : public Layer } /** Sets the layer "gamma" values. */ - void setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME void setGamma(const std::vector& gammaVals); /** Sets the layer "beta" values. */ - void setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME void setBeta(const std::vector& betaVals); /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); private: void updateMultiplier(); @@ -84,11 +85,11 @@ class BatchNorm2DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const Eigen::Vector& ins) noexcept { auto inMat = Eigen::Map, RTNeuralEigenAlignment>(ins.data()); @@ -102,7 +103,7 @@ class BatchNorm2DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const Eigen::Vector& ins) noexcept { auto inMat = Eigen::Map, RTNeuralEigenAlignment>(ins.data()); @@ -116,28 +117,28 @@ class BatchNorm2DT /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector& gammaVals); /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector&) { } /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector& betaVals); /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector&) { } /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); Eigen::Map, RTNeuralEigenAlignment> outs; diff --git a/RTNeural/batchnorm/batchnorm2d_xsimd.h b/RTNeural/batchnorm/batchnorm2d_xsimd.h index edb35d5..5613e71 100644 --- a/RTNeural/batchnorm/batchnorm2d_xsimd.h +++ b/RTNeural/batchnorm/batchnorm2d_xsimd.h @@ -2,6 +2,7 @@ #define RTNEURAL_BATCHNORM2D_XSIMD_H #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -17,7 +18,7 @@ class BatchNorm2DLayer final : public Layer std::string getName() const noexcept override { return "batchnorm2d"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < num_features; i++) { @@ -36,19 +37,19 @@ class BatchNorm2DLayer final : public Layer } /** Sets the layer "gamma" values. */ - void setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME void setGamma(const std::vector& gammaVals); /** Sets the layer "beta" values. */ - void setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME void setBeta(const std::vector& betaVals); /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); private: void updateMultiplier(); @@ -94,11 +95,11 @@ class BatchNorm2DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < num_features; i++) @@ -112,7 +113,7 @@ class BatchNorm2DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_io_size]) noexcept { for(int i = 0; i < num_features; i++) @@ -126,28 +127,28 @@ class BatchNorm2DT /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector& gammaVals); /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector&) { } /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector& betaVals); /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector&) { } /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); v_type outs[v_io_size]; diff --git a/RTNeural/batchnorm/batchnorm_eigen.h b/RTNeural/batchnorm/batchnorm_eigen.h index e1597d5..a8d42ec 100644 --- a/RTNeural/batchnorm/batchnorm_eigen.h +++ b/RTNeural/batchnorm/batchnorm_eigen.h @@ -2,6 +2,7 @@ #define BATCHNORMEIGEN_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -17,7 +18,7 @@ class BatchNorm1DLayer final : public Layer std::string getName() const noexcept override { return "batchnorm"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { auto inVec = Eigen::Map, RTNeuralEigenAlignment>( input, Layer::in_size, 1); @@ -29,19 +30,19 @@ class BatchNorm1DLayer final : public Layer } /** Sets the layer "gamma" values. */ - void setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME void setGamma(const std::vector& gammaVals); /** Sets the layer "beta" values. */ - void setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME void setBeta(const std::vector& betaVals); /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); private: void updateMultiplier(); @@ -75,11 +76,11 @@ class BatchNorm1DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const Eigen::Matrix& ins) noexcept { outs = multiplier.cwiseProduct(ins - running_mean) + beta; @@ -87,7 +88,7 @@ class BatchNorm1DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const Eigen::Matrix& ins) noexcept { outs = multiplier.cwiseProduct(ins - running_mean); @@ -95,28 +96,28 @@ class BatchNorm1DT /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector& gammaVals); /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector&) { } /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector& betaVals); /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector&) { } /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); Eigen::Map, RTNeuralEigenAlignment> outs; diff --git a/RTNeural/batchnorm/batchnorm_xsimd.h b/RTNeural/batchnorm/batchnorm_xsimd.h index c709bad..cd20737 100644 --- a/RTNeural/batchnorm/batchnorm_xsimd.h +++ b/RTNeural/batchnorm/batchnorm_xsimd.h @@ -2,6 +2,7 @@ #define BATCHNORMXSIMD_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -17,7 +18,7 @@ class BatchNorm1DLayer final : public Layer std::string getName() const noexcept override { return "batchnorm"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { xsimd::transform(input, input + Layer::in_size, running_mean.begin(), out, [](auto const& a, auto const& b) @@ -31,19 +32,19 @@ class BatchNorm1DLayer final : public Layer } /** Sets the layer "gamma" values. */ - void setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME void setGamma(const std::vector& gammaVals); /** Sets the layer "beta" values. */ - void setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME void setBeta(const std::vector& betaVals); /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); private: void updateMultiplier(); @@ -83,11 +84,11 @@ class BatchNorm1DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_out_size]) noexcept { for(int k = 0; k < v_out_size; ++k) @@ -96,7 +97,7 @@ class BatchNorm1DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_out_size]) noexcept { for(int k = 0; k < v_out_size; ++k) @@ -105,28 +106,28 @@ class BatchNorm1DT /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector& gammaVals); + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector& gammaVals); /** Sets the layer "gamma" values. */ template - typename std::enable_if::type setGamma(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setGamma(const std::vector&) { } /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector& betaVals); + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector& betaVals); /** Sets the layer "beta" values. */ template - typename std::enable_if::type setBeta(const std::vector&) { } + RTNEURAL_REALTIME typename std::enable_if::type setBeta(const std::vector&) { } /** Sets the layer's trained running mean. */ - void setRunningMean(const std::vector& runningMean); + RTNEURAL_REALTIME void setRunningMean(const std::vector& runningMean); /** Set's the layer's trained running variance. */ - void setRunningVariance(const std::vector& runningVar); + RTNEURAL_REALTIME void setRunningVariance(const std::vector& runningVar); /** Set's the layer "epsilon" value. */ - void setEpsilon(T epsilon); + RTNEURAL_REALTIME void setEpsilon(T epsilon); v_type outs[v_out_size]; diff --git a/RTNeural/config.h b/RTNeural/config.h new file mode 100644 index 0000000..5b8ad26 --- /dev/null +++ b/RTNeural/config.h @@ -0,0 +1,37 @@ +#pragma once + +#ifndef RTNEURAL_NAMESPACE +#define RTNEURAL_NAMESPACE RTNeural +#endif + +#ifndef RTNEURAL_DEFAULT_ALIGNMENT +#if _MSC_VER +#pragma message("RTNEURAL_DEFAULT_ALIGNMENT was not defined! Using default alignment = 16.") +#else +#warning "RTNEURAL_DEFAULT_ALIGNMENT was not defined! Using default alignment = 16." +#endif +#define RTNEURAL_DEFAULT_ALIGNMENT 16 +#endif + +/** + Facilitate testing real-time safety with RealtimeSanitizer (RADSan) + + For more information, see https://github.com/realtime-sanitizer/radsan. + The `[[clang::realtime]]` attribute is unique to a RADSan-modified + version of clang, and its appearance will result in an error for other + compilers. Here, we make its presence configurable. RealtimeSanitizer is + very early stage, and this configuration may change. + + This real-time safety checking is designed to function mostly in CI. If you + wish to test it locally on your dev machine, you'll need to either: + + i) use the RADSan clang Docker image (recommended), or + ii) get the RADSan clang compiler, + + for which instructions may be found in the RADSan repository above. +*/ +#ifdef RTNEURAL_RADSAN_ENABLED + #define RTNEURAL_REALTIME [[clang::realtime]] +#else + #define RTNEURAL_REALTIME +#endif diff --git a/RTNeural/conv1d/conv1d.h b/RTNeural/conv1d/conv1d.h index 7980b6c..253e4c7 100644 --- a/RTNeural/conv1d/conv1d.h +++ b/RTNeural/conv1d/conv1d.h @@ -10,6 +10,7 @@ #else #include "../Layer.h" #include "../common.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -44,13 +45,13 @@ class Conv1D final : public Layer virtual ~Conv1D(); /** Resets the layer state. */ - void reset() override; + RTNEURAL_REALTIME void reset() override; /** Returns the name of this layer. */ std::string getName() const noexcept override { return "conv1d"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { // insert input into a circular buffer std::copy(input, input + Layer::in_size, state[state_ptr]); @@ -112,20 +113,20 @@ class Conv1D final : public Layer * * The weights vector must have size weights[out_size][in_size][kernel_size * dilation] */ - void setWeights(const std::vector>>& weights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& weights); /** * Sets the layer biases. * * The bias vector must have size bias[out_size] */ - void setBias(const std::vector& biasVals); + RTNEURAL_REALTIME void setBias(const std::vector& biasVals); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the convolution dilation rate. */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } /** Returns the number of "groups" in the convolution. */ int getGroups() const noexcept { return groups; } @@ -195,11 +196,11 @@ class Conv1DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset(); + RTNEURAL_REALTIME void reset(); template = true> /** Performs forward propagation for this layer. */ - inline void forward(const T (&ins)[in_size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[in_size]) noexcept { // insert input into a circular buffer std::copy(std::begin(ins), std::end(ins), state[state_ptr].begin()); @@ -269,20 +270,20 @@ class Conv1DT * * The weights vector must have size weights[out_size][group_count][kernel_size * dilation] */ - void setWeights(const std::vector>>& weights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& weights); /** * Sets the layer biases. * * The bias vector must have size bias[out_size] */ - void setBias(const std::vector& biasVals); + RTNEURAL_REALTIME void setBias(const std::vector& biasVals); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the convolution dilation rate. */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } /** Returns the number of "groups" in the convolution. */ int getGroups() const noexcept { return groups; } diff --git a/RTNeural/conv1d/conv1d_eigen.h b/RTNeural/conv1d/conv1d_eigen.h index 9c7dede..2b3b095 100644 --- a/RTNeural/conv1d/conv1d_eigen.h +++ b/RTNeural/conv1d/conv1d_eigen.h @@ -2,6 +2,7 @@ #define CONV1DEIGEN_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -36,13 +37,13 @@ class Conv1D : public Layer virtual ~Conv1D(); /** Resets the layer state. */ - void reset() override; + RTNEURAL_REALTIME void reset() override; /** Returns the name of this layer. */ std::string getName() const noexcept override { return "conv1d"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { // insert input into a circular buffer state.col(state_ptr) = Eigen::Map, @@ -83,20 +84,20 @@ class Conv1D : public Layer * * The weights vector must have size weights[out_size][in_size][kernel_size * dilation] */ - void setWeights(const std::vector>>& weights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& weights); /** * Sets the layer biases. * * The bias vector must have size bias[out_size] */ - void setBias(const std::vector& biasVals); + RTNEURAL_REALTIME void setBias(const std::vector& biasVals); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the convolution dilation rate. */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } /** Returns the number of "groups" in the convolution. */ int getGroups() const noexcept { return groups; } @@ -171,11 +172,11 @@ class Conv1DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ template = true> - inline void forward(const Eigen::Matrix& ins) noexcept + RTNEURAL_REALTIME inline void forward(const Eigen::Matrix& ins) noexcept { // insert input into a circular buffer state.col(state_ptr) = ins; @@ -196,7 +197,7 @@ class Conv1DT /** Performs forward propagation for this layer (groups > 1). */ template = true> - inline void forward(const Eigen::Matrix& ins) noexcept + RTNEURAL_REALTIME inline void forward(const Eigen::Matrix& ins) noexcept { // insert input into a circular buffer state.col(state_ptr) = ins; @@ -223,20 +224,20 @@ class Conv1DT * * The weights vector must have size weights[out_size][in_size][kernel_size * dilation] */ - void setWeights(const std::vector>>& weights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& weights); /** * Sets the layer biases. * * The bias vector must have size bias[out_size] */ - void setBias(const std::vector& biasVals); + RTNEURAL_REALTIME void setBias(const std::vector& biasVals); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the convolution dilation rate. */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } /** Returns the number of "groups" in the convolution. */ int getGroups() const noexcept { return groups; } diff --git a/RTNeural/conv1d/conv1d_xsimd.h b/RTNeural/conv1d/conv1d_xsimd.h index 077e8bb..89c6fb8 100644 --- a/RTNeural/conv1d/conv1d_xsimd.h +++ b/RTNeural/conv1d/conv1d_xsimd.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include #include #include @@ -39,13 +40,13 @@ class Conv1D : public Layer virtual ~Conv1D(); /** Resets the layer state. */ - void reset() override; + RTNEURAL_REALTIME void reset() override; /** Returns the name of this layer. */ std::string getName() const noexcept override { return "conv1d"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { // insert input into a circular buffer vCopy(input, state[state_ptr].data(), Layer::in_size); @@ -98,20 +99,20 @@ class Conv1D : public Layer * * The weights vector must have size weights[out_size][in_size][kernel_size * dilation] */ - void setWeights(const std::vector>>& weights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& weights); /** * Sets the layer biases. * * The bias vector must have size bias[out_size] */ - void setBias(const std::vector& biasVals); + RTNEURAL_REALTIME void setBias(const std::vector& biasVals); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the convolution dilation rate. */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } /** Returns the number of "groups" in the convolution. */ int getGroups() const noexcept { return groups; } @@ -191,11 +192,11 @@ class Conv1DT constexpr bool isActivation() const noexcept { return false; } /** Resets the layer state. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(G > 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(G > 1), void>::type forward(const v_type (&ins)[v_in_size]) noexcept { // insert input into a circular buffer @@ -241,7 +242,7 @@ class Conv1DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(DR > 1 && G == 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(DR > 1 && G == 1), void>::type forward(const v_type (&ins)[v_in_size]) noexcept { // insert input into a circular buffer @@ -285,7 +286,7 @@ class Conv1DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(DR == 1 && KS > 1 && G == 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(DR == 1 && KS > 1 && G == 1), void>::type forward(const v_type (&ins)[v_in_size]) noexcept { // insert input into a circular buffer @@ -321,7 +322,7 @@ class Conv1DT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_in_size]) noexcept { for(int i = 0; i < v_out_size; ++i) @@ -346,20 +347,20 @@ class Conv1DT * * The weights vector must have size weights[out_size][in_size][kernel_size * dilation] */ - void setWeights(const std::vector>>& weights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& weights); /** * Sets the layer biases. * * The bias vector must have size bias[out_size] */ - void setBias(const std::vector& biasVals); + RTNEURAL_REALTIME void setBias(const std::vector& biasVals); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the convolution dilation rate. */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } /** Returns the number of "groups" in the convolution. */ int getGroups() const noexcept { return groups; } diff --git a/RTNeural/conv1d_stateless/conv1d_stateless.h b/RTNeural/conv1d_stateless/conv1d_stateless.h index b15d135..9252852 100644 --- a/RTNeural/conv1d_stateless/conv1d_stateless.h +++ b/RTNeural/conv1d_stateless/conv1d_stateless.h @@ -9,6 +9,7 @@ #include "conv1d_stateless_xsimd.tpp" #else #include "../Layer.h" +#include "../config.h" namespace RTNEURAL_NAMESPACE { @@ -74,7 +75,7 @@ class Conv1DStateless : public Layer constexpr bool isActivation() const noexcept { return false; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* output) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* output) noexcept override { if(valid_pad) { @@ -145,13 +146,13 @@ class Conv1DStateless : public Layer * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& inWeights); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the stride. */ - int getStride() const noexcept { return stride; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride; } private: const int num_filters_in; @@ -203,11 +204,11 @@ class Conv1DStatelessT constexpr bool isActivation() const noexcept { return false; } /** Empty function, this layer has no state */ - void reset() {}; + RTNEURAL_REALTIME void reset() {}; /** Performs forward propagation for this layer if pad is "valid". */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&inMatrix)[num_features_in_t * num_filters_in_t]) noexcept { for(int out_row_idx = 0; out_row_idx < num_filters_out_t; ++out_row_idx) @@ -228,7 +229,7 @@ class Conv1DStatelessT /** Performs forward propagation for this layer if pad is "same" */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&inMatrix)[num_features_in_t * num_filters_in_t]) noexcept { for(int out_row_idx = 0; out_row_idx < num_filters_out_t; ++out_row_idx) @@ -280,20 +281,20 @@ class Conv1DStatelessT * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& inWeights); /** * Sets the layer weights. * * The weights vector must have size weights[kernel_size][num_filters_in][num_filters_out] */ - void setWeightsTransposed(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeightsTransposed(const std::vector>>& inWeights); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size_t; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size_t; } /** Returns the convolution dilation rate. */ - int getStride() const noexcept { return stride_t; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride_t; } T outs alignas(RTNEURAL_DEFAULT_ALIGNMENT)[num_filters_out_t * num_features_out] {}; diff --git a/RTNeural/conv1d_stateless/conv1d_stateless_eigen.h b/RTNeural/conv1d_stateless/conv1d_stateless_eigen.h index 8bed159..db2929f 100644 --- a/RTNeural/conv1d_stateless/conv1d_stateless_eigen.h +++ b/RTNeural/conv1d_stateless/conv1d_stateless_eigen.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -60,7 +61,7 @@ class Conv1DStateless : public Layer } /** Resets the layer state. */ - void reset() override {}; + RTNEURAL_REALTIME void reset() override {}; /** Returns the name of this layer. */ std::string getName() const noexcept override { return "conv1d_stateless"; } @@ -69,7 +70,7 @@ class Conv1DStateless : public Layer constexpr bool isActivation() const noexcept { return false; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* output) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* output) noexcept override { auto inMatrix = Eigen::Map, RTNeuralEigenAlignment>(input, num_filters_in, num_features_in); @@ -112,13 +113,13 @@ class Conv1DStateless : public Layer * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& inWeights); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the stride. */ - int getStride() const noexcept { return stride; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride; } private: const int num_filters_in; @@ -171,11 +172,11 @@ class Conv1DStatelessT constexpr bool isActivation() const noexcept { return false; } /** Empty function, this layer has no state */ - void reset() {}; + RTNEURAL_REALTIME void reset() {}; /** Performs forward propagation for this layer if pad is "valid". */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const input_type& inMatrix) noexcept { // perform a multichannel convolution @@ -191,7 +192,7 @@ class Conv1DStatelessT /** Performs forward propagation for this layer if pad is "same" */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const input_type& inMatrix) noexcept { // perform a multichannel convolution @@ -222,20 +223,20 @@ class Conv1DStatelessT * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& inWeights); /** * Sets the layer weights. * * The weights vector must have size weights[kernel_size][num_filters_in][num_filters_out] */ - void setWeightsTransposed(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeightsTransposed(const std::vector>>& inWeights); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size_t; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size_t; } /** Returns the convolution dilation rate. */ - int getStride() const noexcept { return stride_t; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride_t; } Eigen::Map outs; diff --git a/RTNeural/conv1d_stateless/conv1d_stateless_xsimd.h b/RTNeural/conv1d_stateless/conv1d_stateless_xsimd.h index 7e34f0e..d43ab7a 100644 --- a/RTNeural/conv1d_stateless/conv1d_stateless_xsimd.h +++ b/RTNeural/conv1d_stateless/conv1d_stateless_xsimd.h @@ -2,6 +2,7 @@ #define RTNEURAL_CONV1D_STATELESS_XSIMD_H #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -59,7 +60,7 @@ class Conv1DStateless : public Layer } /** Resets the layer state. */ - void reset() override { } + RTNEURAL_REALTIME void reset() override { } /** Returns the name of this layer. */ std::string getName() const noexcept override { return "conv1d_stateless"; } @@ -68,7 +69,7 @@ class Conv1DStateless : public Layer constexpr bool isActivation() const noexcept { return false; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* output) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* output) noexcept override { if(valid_pad) { @@ -159,13 +160,13 @@ class Conv1DStateless : public Layer * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& inWeights); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size; } /** Returns the stride. */ - int getStride() const noexcept { return stride; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride; } private: const int num_filters_in; @@ -226,11 +227,11 @@ class Conv1DStatelessT constexpr bool isActivation() const noexcept { return false; } /** Empty function, this layer has no state */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer if pad is "valid". */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&inMatrix)[v_in_size]) noexcept { // @TODO: can we vectorize in the other direction if num_filters == 1? @@ -257,7 +258,7 @@ class Conv1DStatelessT /** Performs forward propagation for this layer if pad is "same" */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&inMatrix)[v_in_size]) noexcept { int out_col_idx = 0; @@ -330,13 +331,13 @@ class Conv1DStatelessT * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>& inWeights); /** Returns the size of the convolution kernel. */ - int getKernelSize() const noexcept { return kernel_size_t; } + RTNEURAL_REALTIME int getKernelSize() const noexcept { return kernel_size_t; } /** Returns the convolution dilation rate. */ - int getStride() const noexcept { return stride_t; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride_t; } v_type outs[v_out_size]; diff --git a/RTNeural/conv2d/conv2d.h b/RTNeural/conv2d/conv2d.h index fa88b36..1752a5e 100644 --- a/RTNeural/conv2d/conv2d.h +++ b/RTNeural/conv2d/conv2d.h @@ -10,6 +10,7 @@ #else #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../conv1d_stateless/conv1d_stateless.h" namespace RTNEURAL_NAMESPACE @@ -40,7 +41,7 @@ class Conv2D : public Layer virtual ~Conv2D() = default; /** Reset the layer's state */ - void reset() override + RTNEURAL_REALTIME void reset() override { state_index = 0; @@ -57,7 +58,7 @@ class Conv2D : public Layer constexpr bool isActivation() const noexcept { return false; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* output) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* output) noexcept override { for(int i = 0; i < kernel_size_time; ++i) { @@ -83,26 +84,26 @@ class Conv2D : public Layer * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>>& inWeights); /** * Sets the layer biases. * * The bias vector must have size bias[num_filters_out] */ - void setBias(const std::vector& inBias); + RTNEURAL_REALTIME void setBias(const std::vector& inBias); /** Returns the size of the convolution kernel (time axis). */ - int getKernelSizeTime() const noexcept { return kernel_size_time; } + RTNEURAL_REALTIME int getKernelSizeTime() const noexcept { return kernel_size_time; } /** Returns the size of the convolution kernel (feature axis). */ - int getKernelSizeFeature() const noexcept { return kernel_size_feature; } + RTNEURAL_REALTIME int getKernelSizeFeature() const noexcept { return kernel_size_feature; } /** Returns the convolution stride (feature axis) */ - int getStride() const noexcept { return stride; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride; } /** Returns the convolution dilation rate (time axis) */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } const int num_filters_in; const int num_features_in; @@ -171,7 +172,7 @@ class Conv2DT constexpr bool isActivation() const noexcept { return false; } /** Reset the layer's state */ - void reset() + RTNEURAL_REALTIME void reset() { state_index = 0; @@ -182,7 +183,7 @@ class Conv2DT }; /** Performs forward propagation for this layer. */ - inline void forward(const T (&ins)[in_size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[in_size]) noexcept { for(int i = 0; i < kernel_size_time; ++i) { @@ -212,26 +213,26 @@ class Conv2DT * * The weights vector must have size weights [kernel_size_time][num_filters_out][num_filters_in][kernel_size_feature] */ - void setWeights(const std::vector>>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>>& inWeights); /** * Sets the layer biases. * * The bias vector must have size bias[num_filters_out] */ - void setBias(const std::vector& inBias); + RTNEURAL_REALTIME void setBias(const std::vector& inBias); /** Returns the size of the convolution kernel (time axis). */ - int getKernelSizeTime() const noexcept { return kernel_size_time_t; } + RTNEURAL_REALTIME int getKernelSizeTime() const noexcept { return kernel_size_time_t; } /** Returns the size of the convolution kernel (feature axis). */ - int getKernelSizeFeature() const noexcept { return kernel_size_feature_t; } + RTNEURAL_REALTIME int getKernelSizeFeature() const noexcept { return kernel_size_feature_t; } /** Returns the convolution stride */ - int getStride() const noexcept { return stride_t; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride_t; } /** Returns the convolution dilation rate */ - int getDilationRate() const noexcept { return dilation_rate_t; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate_t; } T outs alignas(RTNEURAL_DEFAULT_ALIGNMENT)[num_filters_out_t * num_features_out]; @@ -250,4 +251,4 @@ class Conv2DT } // RTNEURAL #endif // RTNEURAL_USE_STL -#endif // CONV2D_H_INCLUDED \ No newline at end of file +#endif // CONV2D_H_INCLUDED diff --git a/RTNeural/conv2d/conv2d_eigen.h b/RTNeural/conv2d/conv2d_eigen.h index e03f600..9dd7ed7 100644 --- a/RTNeural/conv2d/conv2d_eigen.h +++ b/RTNeural/conv2d/conv2d_eigen.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../conv1d_stateless/conv1d_stateless.h" #include @@ -34,7 +35,7 @@ class Conv2D : public Layer virtual ~Conv2D() = default; /** Reset the layer's state */ - void reset() override + RTNEURAL_REALTIME void reset() override { state_index = 0; @@ -51,7 +52,7 @@ class Conv2D : public Layer constexpr bool isActivation() const noexcept { return false; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* output) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* output) noexcept override { auto inMatrix = Eigen::Map, RTNeuralEigenAlignment>(input, num_filters_in, num_features_in); @@ -77,26 +78,26 @@ class Conv2D : public Layer * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>>& inWeights); /** * Sets the layer biases. * * The bias vector must have size bias[num_filters_out] */ - void setBias(const std::vector& inBias); + RTNEURAL_REALTIME void setBias(const std::vector& inBias); /** Returns the size of the convolution kernel (time axis). */ - int getKernelSizeTime() const noexcept { return kernel_size_time; } + RTNEURAL_REALTIME int getKernelSizeTime() const noexcept { return kernel_size_time; } /** Returns the size of the convolution kernel (feature axis). */ - int getKernelSizeFeature() const noexcept { return kernel_size_feature; } + RTNEURAL_REALTIME int getKernelSizeFeature() const noexcept { return kernel_size_feature; } /** Returns the convolution stride (feature axis) */ - int getStride() const noexcept { return stride; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride; } /** Returns the convolution dilation rate (time axis) */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } const int num_filters_in; const int num_features_in; @@ -168,7 +169,7 @@ class Conv2DT constexpr bool isActivation() const noexcept { return false; } /** Reset the layer's state */ - void reset() + RTNEURAL_REALTIME void reset() { state_index = 0; @@ -179,7 +180,7 @@ class Conv2DT }; /** Performs forward propagation for this layer. */ - inline void forward(const input_type_flat& inMatrix) noexcept + RTNEURAL_REALTIME inline void forward(const input_type_flat& inMatrix) noexcept { const auto inMatrixReshaped = Eigen::Map(inMatrix.data()); auto outMatrix = Eigen::Map(outs.data()); @@ -204,26 +205,26 @@ class Conv2DT * * The weights vector must have size weights [kernel_size_time][num_filters_out][num_filters_in][kernel_size_feature] */ - void setWeights(const std::vector>>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>>& inWeights); /** * Sets the layer biases. * * The bias vector must have size bias[num_filters_out] */ - void setBias(const std::vector& inBias); + RTNEURAL_REALTIME void setBias(const std::vector& inBias); /** Returns the size of the convolution kernel (time axis). */ - int getKernelSizeTime() const noexcept { return kernel_size_time_t; } + RTNEURAL_REALTIME int getKernelSizeTime() const noexcept { return kernel_size_time_t; } /** Returns the size of the convolution kernel (feature axis). */ - int getKernelSizeFeature() const noexcept { return kernel_size_feature_t; } + RTNEURAL_REALTIME int getKernelSizeFeature() const noexcept { return kernel_size_feature_t; } /** Returns the convolution stride */ - int getStride() const noexcept { return stride_t; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride_t; } /** Returns the convolution dilation rate */ - int getDilationRate() const noexcept { return dilation_rate_t; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate_t; } Eigen::Map outs; diff --git a/RTNeural/conv2d/conv2d_xsimd.h b/RTNeural/conv2d/conv2d_xsimd.h index b27c1b4..5c783ff 100644 --- a/RTNeural/conv2d/conv2d_xsimd.h +++ b/RTNeural/conv2d/conv2d_xsimd.h @@ -2,6 +2,7 @@ #define RTNEURAL_CONV2D_XSIMD_H #include "../Layer.h" +#include "../config.h" #include "../conv1d_stateless/conv1d_stateless.h" #include @@ -33,7 +34,7 @@ class Conv2D : public Layer virtual ~Conv2D() = default; /** Reset the layer's state */ - void reset() override + RTNEURAL_REALTIME void reset() override { state_index = 0; @@ -50,7 +51,7 @@ class Conv2D : public Layer constexpr bool isActivation() const noexcept { return false; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* output) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* output) noexcept override { for(int i = 0; i < kernel_size_time; ++i) { @@ -76,26 +77,26 @@ class Conv2D : public Layer * * The weights vector must have size weights[num_filters_out][num_filters_in][kernel_size] */ - void setWeights(const std::vector>>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>>& inWeights); /** * Sets the layer biases. * * The bias vector must have size bias[num_filters_out] */ - void setBias(const std::vector& inBias); + RTNEURAL_REALTIME void setBias(const std::vector& inBias); /** Returns the size of the convolution kernel (time axis). */ - int getKernelSizeTime() const noexcept { return kernel_size_time; } + RTNEURAL_REALTIME int getKernelSizeTime() const noexcept { return kernel_size_time; } /** Returns the size of the convolution kernel (feature axis). */ - int getKernelSizeFeature() const noexcept { return kernel_size_feature; } + RTNEURAL_REALTIME int getKernelSizeFeature() const noexcept { return kernel_size_feature; } /** Returns the convolution stride (feature axis) */ - int getStride() const noexcept { return stride; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride; } /** Returns the convolution dilation rate (time axis) */ - int getDilationRate() const noexcept { return dilation_rate; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate; } const int num_filters_in; const int num_features_in; @@ -170,7 +171,7 @@ class Conv2DT constexpr bool isActivation() const noexcept { return false; } /** Reset the layer's state */ - void reset() + RTNEURAL_REALTIME void reset() { state_index = 0; @@ -181,7 +182,7 @@ class Conv2DT } /** Performs forward propagation for this layer. */ - inline void forward(const v_type (&ins)[v_in_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_in_size]) noexcept { for(int i = 0; i < kernel_size_time; ++i) { @@ -211,26 +212,26 @@ class Conv2DT * * The weights vector must have size weights [kernel_size_time][num_filters_out][num_filters_in][kernel_size_feature] */ - void setWeights(const std::vector>>>& inWeights); + RTNEURAL_REALTIME void setWeights(const std::vector>>>& inWeights); /** * Sets the layer biases. * * The bias vector must have size bias[num_filters_out] */ - void setBias(const std::vector& inBias); + RTNEURAL_REALTIME void setBias(const std::vector& inBias); /** Returns the size of the convolution kernel (time axis). */ - int getKernelSizeTime() const noexcept { return kernel_size_time_t; } + RTNEURAL_REALTIME int getKernelSizeTime() const noexcept { return kernel_size_time_t; } /** Returns the size of the convolution kernel (feature axis). */ - int getKernelSizeFeature() const noexcept { return kernel_size_feature_t; } + RTNEURAL_REALTIME int getKernelSizeFeature() const noexcept { return kernel_size_feature_t; } /** Returns the convolution stride */ - int getStride() const noexcept { return stride_t; } + RTNEURAL_REALTIME int getStride() const noexcept { return stride_t; } /** Returns the convolution dilation rate */ - int getDilationRate() const noexcept { return dilation_rate_t; } + RTNEURAL_REALTIME int getDilationRate() const noexcept { return dilation_rate_t; } v_type outs[v_out_size]; diff --git a/RTNeural/dense/dense.h b/RTNeural/dense/dense.h index 76395f4..9ab5973 100644 --- a/RTNeural/dense/dense.h +++ b/RTNeural/dense/dense.h @@ -11,6 +11,7 @@ #include "dense_xsimd.h" #else #include "../Layer.h" +#include "../config.h" namespace RTNEURAL_NAMESPACE { @@ -29,22 +30,22 @@ class Dense1 ~Dense1() { delete[] weights; } - inline T forward(const T* input) noexcept + RTNEURAL_REALTIME inline T forward(const T* input) noexcept { return std::inner_product(weights, weights + in_size, input, (T)0) + bias; } - void setWeights(const T* newWeights) + RTNEURAL_REALTIME void setWeights(const T* newWeights) { for(int i = 0; i < in_size; ++i) weights[i] = newWeights[i]; } - void setBias(T b) { bias = b; } + RTNEURAL_REALTIME void setBias(T b) { bias = b; } - T getWeight(int i) const noexcept { return weights[i]; } + RTNEURAL_REALTIME T getWeight(int i) const noexcept { return weights[i]; } - T getBias() const noexcept { return bias; } + RTNEURAL_REALTIME T getBias() const noexcept { return bias; } private: const int in_size; @@ -98,7 +99,7 @@ class Dense final : public Layer std::string getName() const noexcept override { return "dense"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < Layer::out_size; ++i) out[i] = subLayers[i]->forward(input); @@ -110,7 +111,7 @@ class Dense final : public Layer * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < Layer::out_size; ++i) subLayers[i]->setWeights(newWeights[i].data()); @@ -122,7 +123,7 @@ class Dense final : public Layer * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < Layer::out_size; ++i) subLayers[i]->setWeights(newWeights[i]); @@ -132,20 +133,20 @@ class Dense final : public Layer * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < Layer::out_size; ++i) subLayers[i]->setBias(b[i]); } /** Returns the weights value at the given indices. */ - T getWeight(int i, int k) const noexcept + RTNEURAL_REALTIME T getWeight(int i, int k) const noexcept { return subLayers[i]->getWeight(k); } /** Returns the bias value at the given index. */ - T getBias(int i) const noexcept { return subLayers[i]->getBias(); } + RTNEURAL_REALTIME T getBias(int i) const noexcept { return subLayers[i]->getBias(); } private: Dense1** subLayers; @@ -184,10 +185,10 @@ class DenseT constexpr bool isActivation() const noexcept { return false; } /** Reset is a no-op, since Dense does not have state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ - inline void forward(const T (&ins)[in_size]) noexcept + RTNEURAL_REALTIME inline void forward(const T (&ins)[in_size]) noexcept { for(int i = 0; i < out_size; ++i) outs[i] = std::inner_product(ins, ins + in_size, &weights[i * in_size], (T)0) + bias[i]; @@ -199,7 +200,7 @@ class DenseT * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < out_size; ++i) { @@ -217,7 +218,7 @@ class DenseT * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < out_size; ++i) { @@ -233,7 +234,7 @@ class DenseT * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < out_size; ++i) bias[i] = b[i]; diff --git a/RTNeural/dense/dense_eigen.h b/RTNeural/dense/dense_eigen.h index d858c0e..18b540e 100644 --- a/RTNeural/dense/dense_eigen.h +++ b/RTNeural/dense/dense_eigen.h @@ -2,6 +2,7 @@ #define DENSEEIGEN_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -48,7 +49,7 @@ class Dense : public Layer std::string getName() const noexcept override { return "dense"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int i = 0; i < Layer::in_size; ++i) inVec(i, 0) = input[i]; @@ -69,7 +70,7 @@ class Dense : public Layer * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < Layer::out_size; ++i) for(int k = 0; k < Layer::in_size; ++k) @@ -82,7 +83,7 @@ class Dense : public Layer * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < Layer::out_size; ++i) for(int k = 0; k < Layer::in_size; ++k) @@ -93,17 +94,17 @@ class Dense : public Layer * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < Layer::out_size; ++i) weights(i, Layer::in_size) = b[i]; } /** Returns the weights value at the given indices. */ - T getWeight(int i, int k) const noexcept { return weights(i, k); } + RTNEURAL_REALTIME T getWeight(int i, int k) const noexcept { return weights(i, k); } /** Returns the bias value at the given index. */ - T getBias(int i) const noexcept { return weights(i, Layer::in_size); } + RTNEURAL_REALTIME T getBias(int i) const noexcept { return weights(i, Layer::in_size); } private: Eigen::Matrix weights; @@ -144,10 +145,10 @@ class DenseT constexpr bool isActivation() const noexcept { return false; } /** Reset is a no-op, since Dense does not have state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ - inline void forward(const Eigen::Matrix& ins) noexcept + RTNEURAL_REALTIME inline void forward(const Eigen::Matrix& ins) noexcept { for(int i = 0; i < in_size; ++i) ins_internal(i, 0) = ins(i, 0); @@ -165,7 +166,7 @@ class DenseT * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < out_size; ++i) for(int k = 0; k < in_size; ++k) @@ -178,7 +179,7 @@ class DenseT * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < out_size; ++i) for(int k = 0; k < in_size; ++k) @@ -189,7 +190,7 @@ class DenseT * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < out_size; ++i) weights(i, in_size) = b[i]; diff --git a/RTNeural/dense/dense_xsimd.h b/RTNeural/dense/dense_xsimd.h index 15d797e..9946ecc 100644 --- a/RTNeural/dense/dense_xsimd.h +++ b/RTNeural/dense/dense_xsimd.h @@ -2,6 +2,7 @@ #define DENSEXSIMD_H_INCLUDED #include "../Layer.h" +#include "../config.h" #include namespace RTNEURAL_NAMESPACE @@ -47,7 +48,7 @@ class Dense : public Layer std::string getName() const noexcept override { return "dense"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* out) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* out) noexcept override { for(int l = 0; l < Layer::out_size; ++l) { @@ -66,7 +67,7 @@ class Dense : public Layer * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < Layer::out_size; ++i) for(int k = 0; k < Layer::in_size; ++k) @@ -79,7 +80,7 @@ class Dense : public Layer * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < Layer::out_size; ++i) for(int k = 0; k < Layer::in_size; ++k) @@ -90,17 +91,17 @@ class Dense : public Layer * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < Layer::out_size; ++i) bias[i] = b[i]; } /** Returns the weights value at the given indices. */ - T getWeight(int i, int k) const noexcept { return weights[i][k]; } + RTNEURAL_REALTIME T getWeight(int i, int k) const noexcept { return weights[i][k]; } /** Returns the bias value at the given index. */ - T getBias(int i) const noexcept { return bias[i]; } + RTNEURAL_REALTIME T getBias(int i) const noexcept { return bias[i]; } private: using vec_type = std::vector>; @@ -149,10 +150,10 @@ class DenseT constexpr bool isActivation() const noexcept { return false; } /** Reset is a no-op, since Dense does not have state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ - inline void forward(const v_type (&ins)[v_in_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_in_size]) noexcept { static constexpr auto v_size_inner = std::min(v_size, in_size); @@ -177,7 +178,7 @@ class DenseT * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < out_size; ++i) { @@ -194,7 +195,7 @@ class DenseT * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < out_size; ++i) { @@ -209,7 +210,7 @@ class DenseT * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < out_size; ++i) bias[i / v_size] = set_value(bias[i / v_size], i % v_size, b[i]); @@ -248,9 +249,9 @@ class DenseT std::string getName() const noexcept { return "dense"; } constexpr bool isActivation() const noexcept { return false; } - void reset() { } + RTNEURAL_REALTIME void reset() { } - inline void forward(const v_type (&ins)[v_in_size]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[v_in_size]) noexcept { v_type y {}; for(int k = 0; k < v_in_size; ++k) @@ -259,7 +260,7 @@ class DenseT outs[0] = v_type(xsimd::reduce_add(y) + bias); } - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < out_size; ++i) { @@ -271,7 +272,7 @@ class DenseT } } - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < out_size; ++i) { @@ -283,7 +284,7 @@ class DenseT } } - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { bias = b[0]; } @@ -329,10 +330,10 @@ class DenseT constexpr bool isActivation() const noexcept { return false; } /** Reset is a no-op, since Dense does not have state. */ - void reset() { } + RTNEURAL_REALTIME void reset() { } /** Performs forward propagation for this layer. */ - inline void forward(const v_type (&ins)[1]) noexcept + RTNEURAL_REALTIME inline void forward(const v_type (&ins)[1]) noexcept { for(int i = 0; i < v_out_size; ++i) outs[i] = bias[i]; @@ -347,7 +348,7 @@ class DenseT * The dimension of the weights vector must be * weights[out_size][in_size] */ - void setWeights(const std::vector>& newWeights) + RTNEURAL_REALTIME void setWeights(const std::vector>& newWeights) { for(int i = 0; i < out_size; ++i) weights[i / v_size] = set_value(weights[i / v_size], i % v_size, newWeights[i][0]); @@ -359,7 +360,7 @@ class DenseT * The dimension of the weights array must be * weights[out_size][in_size] */ - void setWeights(T** newWeights) + RTNEURAL_REALTIME void setWeights(T** newWeights) { for(int i = 0; i < out_size; ++i) weights[i / v_size] = set_value(weights[i / v_size], i % v_size, newWeights[i][0]); @@ -369,7 +370,7 @@ class DenseT * Sets the layer bias from a given array of size * bias[out_size] */ - void setBias(const T* b) + RTNEURAL_REALTIME void setBias(const T* b) { for(int i = 0; i < out_size; ++i) bias[i / v_size] = set_value(bias[i / v_size], i % v_size, b[i]); diff --git a/RTNeural/gru/gru.h b/RTNeural/gru/gru.h index 6d45242..cf840ec 100644 --- a/RTNeural/gru/gru.h +++ b/RTNeural/gru/gru.h @@ -12,6 +12,7 @@ #else #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../maths/maths_stl.h" #include @@ -38,13 +39,13 @@ class GRULayer final : public Layer virtual ~GRULayer(); /** Resets the state of the GRU. */ - void reset() override { std::fill(ht1, ht1 + Layer::out_size, (T)0); } + RTNEURAL_REALTIME void reset() override { std::fill(ht1, ht1 + Layer::out_size, (T)0); } /** Returns the name of this layer. */ std::string getName() const noexcept override { return "gru"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { for(int i = 0; i < Layer::out_size; ++i) { @@ -62,51 +63,51 @@ class GRULayer final : public Layer * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(T** wVals); + RTNEURAL_REALTIME void setWVals(T** wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(T** uVals); + RTNEURAL_REALTIME void setUVals(T** uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(T** bVals); + RTNEURAL_REALTIME void setBVals(T** bVals); /** * Sets the layer kernel weights. * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(const std::vector>& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector>& bVals); /** Returns the kernel weight for the given indices. */ - T getWVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getWVal(int i, int k) const noexcept; /** Returns the recurrent weight for the given indices. */ - T getUVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getUVal(int i, int k) const noexcept; /** Returns the bias value for the given indices. */ - T getBVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getBVal(int i, int k) const noexcept; protected: T* ht1; @@ -171,11 +172,11 @@ class GRULayerT prepare(T delaySamples); /** Resets the state of the GRU. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(N > 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(N > 1), void>::type forward(const T (&ins)[in_size]) noexcept { // compute zt @@ -201,7 +202,7 @@ class GRULayerT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[in_size]) noexcept { // compute zt @@ -227,21 +228,21 @@ class GRULayerT * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(const std::vector>& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector>& bVals); T outs alignas(RTNEURAL_DEFAULT_ALIGNMENT)[out_size]; diff --git a/RTNeural/gru/gru_eigen.h b/RTNeural/gru/gru_eigen.h index 54e468f..1bde669 100644 --- a/RTNeural/gru/gru_eigen.h +++ b/RTNeural/gru/gru_eigen.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../maths/maths_eigen.h" namespace RTNEURAL_NAMESPACE @@ -28,7 +29,7 @@ class GRULayer : public Layer virtual ~GRULayer() = default; /** Resets the state of the GRU. */ - void reset() override + RTNEURAL_REALTIME void reset() override { extendedHt1.setZero(); extendedHt1(Layer::out_size) = (T)1; @@ -38,7 +39,7 @@ class GRULayer : public Layer std::string getName() const noexcept override { return "gru"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { for(int i = 0; i < Layer::in_size; ++i) { @@ -89,34 +90,34 @@ class GRULayer : public Layer * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(T** wVals); + RTNEURAL_REALTIME void setWVals(T** wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(T** uVals); + RTNEURAL_REALTIME void setUVals(T** uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(T** bVals); + RTNEURAL_REALTIME void setBVals(T** bVals); /** Returns the kernel weight for the given indices. */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** Returns the recurrent weight for the given indices. */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** Returns the bias value for the given indices. */ - void setBVals(const std::vector>& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector>& bVals); - T getWVal(int i, int k) const noexcept; - T getUVal(int i, int k) const noexcept; - T getBVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getWVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getUVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getBVal(int i, int k) const noexcept; private: // Kernels @@ -191,10 +192,10 @@ class GRULayerT prepare(T delaySamples); /** Resets the state of the GRU. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ - inline void forward(const in_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const in_type& ins) noexcept { for(int i = 0; i < in_sizet; ++i) { @@ -241,21 +242,21 @@ class GRULayerT * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(const std::vector>& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector>& bVals); Eigen::Map outs; diff --git a/RTNeural/gru/gru_xsimd.h b/RTNeural/gru/gru_xsimd.h index 665ff10..507312c 100644 --- a/RTNeural/gru/gru_xsimd.h +++ b/RTNeural/gru/gru_xsimd.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../maths/maths_xsimd.h" #include namespace RTNEURAL_NAMESPACE @@ -28,13 +29,13 @@ class GRULayer : public Layer virtual ~GRULayer(); /** Resets the state of the GRU. */ - void reset() override { std::fill(ht1.begin(), ht1.end(), (T)0); } + RTNEURAL_REALTIME void reset() override { std::fill(ht1.begin(), ht1.end(), (T)0); } /** Returns the name of this layer. */ std::string getName() const noexcept override { return "gru"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { for(int i = 0; i < Layer::out_size; ++i) { @@ -71,51 +72,51 @@ class GRULayer : public Layer * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(T** wVals); + RTNEURAL_REALTIME void setWVals(T** wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(T** uVals); + RTNEURAL_REALTIME void setUVals(T** uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(T** bVals); + RTNEURAL_REALTIME void setBVals(T** bVals); /** * Sets the layer kernel weights. * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(const std::vector>& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector>& bVals); /** Returns the kernel weight for the given indices. */ - T getWVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getWVal(int i, int k) const noexcept; /** Returns the recurrent weight for the given indices. */ - T getUVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getUVal(int i, int k) const noexcept; /** Returns the bias value for the given indices. */ - T getBVal(int i, int k) const noexcept; + RTNEURAL_REALTIME T getBVal(int i, int k) const noexcept; protected: using vec_type = std::vector>; @@ -190,11 +191,11 @@ class GRULayerT prepare(T delaySamples); /** Resets the state of the GRU. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(N > 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(N > 1), void>::type forward(const v_type (&ins)[v_in_size]) noexcept { // compute zt @@ -220,7 +221,7 @@ class GRULayerT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_in_size]) noexcept { // compute zt @@ -246,21 +247,21 @@ class GRULayerT * * The weights vector must have size weights[in_size][3 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][3 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[2][3 * out_size] */ - void setBVals(const std::vector>& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector>& bVals); v_type outs[v_out_size]; diff --git a/RTNeural/lstm/lstm.h b/RTNeural/lstm/lstm.h index c4a85d8..80e495a 100644 --- a/RTNeural/lstm/lstm.h +++ b/RTNeural/lstm/lstm.h @@ -10,6 +10,7 @@ #else #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../maths/maths_stl.h" #include @@ -36,13 +37,13 @@ class LSTMLayer final : public Layer virtual ~LSTMLayer(); /** Resets the state of the LSTM. */ - void reset() override; + RTNEURAL_REALTIME void reset() override; /** Returns the name of this layer. */ std::string getName() const noexcept override { return "lstm"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { for(int i = 0; i < Layer::out_size; ++i) { @@ -63,21 +64,21 @@ class LSTMLayer final : public Layer * * The weights vector must have size weights[in_size][4 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][4 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[4 * out_size] */ - void setBVals(const std::vector& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector& bVals); protected: T* ht1; @@ -144,11 +145,11 @@ class LSTMLayerT prepare(T delaySamples); /** Resets the state of the LSTM. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(N > 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(N > 1), void>::type forward(const T (&ins)[in_size]) noexcept { // compute ft @@ -174,7 +175,7 @@ class LSTMLayerT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const T (&ins)[in_size]) noexcept { // compute ft @@ -200,21 +201,21 @@ class LSTMLayerT * * The weights vector must have size weights[in_size][4 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][4 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[4 * out_size] */ - void setBVals(const std::vector& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector& bVals); T outs alignas(RTNEURAL_DEFAULT_ALIGNMENT)[out_size]; diff --git a/RTNeural/lstm/lstm_eigen.h b/RTNeural/lstm/lstm_eigen.h index 3d614d0..0beffef 100644 --- a/RTNeural/lstm/lstm_eigen.h +++ b/RTNeural/lstm/lstm_eigen.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../maths/maths_eigen.h" namespace RTNEURAL_NAMESPACE @@ -31,10 +32,10 @@ class LSTMLayer : public Layer std::string getName() const noexcept override { return "lstm"; } /** Resets the state of the LSTM. */ - void reset() override; + RTNEURAL_REALTIME void reset() override; /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { for(int i = 0; i < Layer::in_size; ++i) { @@ -70,21 +71,21 @@ class LSTMLayer : public Layer * * The weights vector must have size weights[in_size][4 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][4 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[4 * out_size] */ - void setBVals(const std::vector& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector& bVals); private: Eigen::Matrix combinedWeights; @@ -146,10 +147,10 @@ class LSTMLayerT prepare(T delaySamples); /** Resets the state of the LSTM. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ - inline void forward(const in_type& ins) noexcept + RTNEURAL_REALTIME inline void forward(const in_type& ins) noexcept { for(int i = 0; i < in_sizet; ++i) { @@ -175,21 +176,21 @@ class LSTMLayerT * * The weights vector must have size weights[in_size][4 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][4 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[4 * out_size] */ - void setBVals(const std::vector& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector& bVals); Eigen::Map outs; diff --git a/RTNeural/lstm/lstm_xsimd.h b/RTNeural/lstm/lstm_xsimd.h index 9ef8af0..26f3cc7 100644 --- a/RTNeural/lstm/lstm_xsimd.h +++ b/RTNeural/lstm/lstm_xsimd.h @@ -3,6 +3,7 @@ #include "../Layer.h" #include "../common.h" +#include "../config.h" #include "../maths/maths_xsimd.h" #include @@ -29,13 +30,13 @@ class LSTMLayer : public Layer virtual ~LSTMLayer(); /** Resets the state of the LSTM. */ - void reset() override; + RTNEURAL_REALTIME void reset() override; /** Returns the name of this layer. */ std::string getName() const noexcept override { return "lstm"; } /** Performs forward propagation for this layer. */ - inline void forward(const T* input, T* h) noexcept override + RTNEURAL_REALTIME inline void forward(const T* input, T* h) noexcept override { for(int i = 0; i < Layer::out_size; ++i) { @@ -73,21 +74,21 @@ class LSTMLayer : public Layer * * The weights vector must have size weights[in_size][4 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][4 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[4 * out_size] */ - void setBVals(const std::vector& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector& bVals); protected: using vec_type = std::vector>; @@ -164,11 +165,11 @@ class LSTMLayerT prepare(T delaySamples); /** Resets the state of the LSTM. */ - void reset(); + RTNEURAL_REALTIME void reset(); /** Performs forward propagation for this layer. */ template - inline typename std::enable_if<(N > 1), void>::type + RTNEURAL_REALTIME inline typename std::enable_if<(N > 1), void>::type forward(const v_type (&ins)[v_in_size]) noexcept { // compute ft @@ -194,7 +195,7 @@ class LSTMLayerT /** Performs forward propagation for this layer. */ template - inline typename std::enable_if::type + RTNEURAL_REALTIME inline typename std::enable_if::type forward(const v_type (&ins)[v_in_size]) noexcept { // compute ft @@ -220,21 +221,21 @@ class LSTMLayerT * * The weights vector must have size weights[in_size][4 * out_size] */ - void setWVals(const std::vector>& wVals); + RTNEURAL_REALTIME void setWVals(const std::vector>& wVals); /** * Sets the layer recurrent weights. * * The weights vector must have size weights[out_size][4 * out_size] */ - void setUVals(const std::vector>& uVals); + RTNEURAL_REALTIME void setUVals(const std::vector>& uVals); /** * Sets the layer bias. * * The bias vector must have size weights[4 * out_size] */ - void setBVals(const std::vector& bVals); + RTNEURAL_REALTIME void setBVals(const std::vector& bVals); v_type outs[v_out_size]; diff --git a/cmake/Sanitizers.cmake b/cmake/Sanitizers.cmake new file mode 100644 index 0000000..510094a --- /dev/null +++ b/cmake/Sanitizers.cmake @@ -0,0 +1,7 @@ +option(RTNEURAL_ENABLE_RADSAN "Enable RealtimeSanitizer (RADSan) checks (requires RADSan clang)" OFF) + +function(rtneural_radsan_configure target) + target_compile_definitions(${target} PUBLIC RTNEURAL_RADSAN_ENABLED) + target_compile_options(${target} PUBLIC -fsanitize=realtime) + target_link_options(${target} PUBLIC -fsanitize=realtime) +endfunction() diff --git a/cmake/Testing.cmake b/cmake/Testing.cmake index 6d6e44e..b92da17 100644 --- a/cmake/Testing.cmake +++ b/cmake/Testing.cmake @@ -4,7 +4,7 @@ macro(rtneural_setup_testing) include(CTest) enable_testing() - add_custom_target(rtneural_test COMMAND ctest -C ${Configuration} --output-on-failure) + add_custom_target(rtneural_test COMMAND ctest -C $ --output-on-failure) # From the GoogleTest README: # For Windows: Prevent overriding the parent project's compiler/linker settings