Skip to content

Commit

Permalink
Testing compatibility with half-precision floating point types (#159)
Browse files Browse the repository at this point in the history
* Working float16 tests with gcc-13

* Try to enable half-precision float tests

* Apply clang-format

* Trying to fix gcc install

* Trying to fix gcc install

* Trying to pass gcc-13 to cmake config

* Verbose test output

* Let's try Eigen and XSIMD

* Skip testing XSIMD for now

---------

Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
  • Loading branch information
jatinchowdhury18 and github-actions[bot] authored Dec 6, 2024
1 parent 21ebabd commit 29e41da
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 10 deletions.
18 changes: 17 additions & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,15 @@ jobs:
- os: ubuntu-latest
name: "STL"
backend: "-DRTNEURAL_STL=ON"
- os: ubuntu-latest
name: "Eigen / C++23"
backend: "-DRTNEURAL_EIGEN=ON -DCMAKE_CXX_STANDARD=23 -DCMAKE_CXX_COMPILER=gcc-13 -DCMAKE_CXX_COMPILER=g++-13"
# - os: ubuntu-latest
# name: "XSIMD / C++23"
# backend: "-DRTNEURAL_XSIMD=ON -DCMAKE_CXX_STANDARD=23 -DCMAKE_CXX_COMPILER=gcc-13 -DCMAKE_CXX_COMPILER=g++-13"
- os: ubuntu-latest
name: "STL / C++23"
backend: "-DRTNEURAL_STL=ON -DCMAKE_CXX_STANDARD=23 -DCMAKE_CXX_COMPILER=gcc-13 -DCMAKE_CXX_COMPILER=g++-13"
- os: windows-latest
name: "Eigen"
backend: "-DRTNEURAL_EIGEN=ON"
Expand Down Expand Up @@ -67,6 +76,13 @@ jobs:
backend: "-DRTNEURAL_STL=ON"

steps:
- name: Install Linux Deps
if: runner.os == 'Linux'
run: |
sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y
sudo apt update
sudo apt install gcc-13 g++-13 -y
- name: Get latest CMake
uses: lukka/get-cmake@latest

Expand All @@ -87,4 +103,4 @@ jobs:

- name: Test
shell: bash
run: ctest -C Release --test-dir build --parallel --output-on-failure
run: ctest -V -C Release --test-dir build --parallel --output-on-failure
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.5)
cmake_minimum_required(VERSION 3.11)
project(RTNeural VERSION 1.0.0)
include(cmake/CXXStandard.cmake)
include(cmake/Sanitizers.cmake)
Expand Down
1 change: 1 addition & 0 deletions cmake/CXXStandard.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ if("${CMAKE_CXX_STANDARD}" STREQUAL "")
message(STATUS "A C++ Standard has not been set for this project! RTNeural is selecting C++14...")
set(CMAKE_CXX_STANDARD 14)
else()
message(STATUS "Compiling RTNeural with C++ version ${CMAKE_CXX_STANDARD}")
if(${CMAKE_CXX_STANDARD} LESS 14)
message(FATAL_ERROR "RTNeural requires C++ 14 or later")
endif()
Expand Down
26 changes: 22 additions & 4 deletions tests/functional/torch_conv1d_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,22 @@
#include "load_csv.hpp"
#include <RTNeural/RTNeural.h>

#if __cplusplus > 202002L
#include <stdfloat>
#endif

namespace
{
template <typename T>
void expectNear(T const& expected, T const& actual)
void expectNear(T const& expected, T const& actual, double error_thresh)
{
EXPECT_THAT(
static_cast<double>(expected),
testing::DoubleNear(static_cast<double>(actual), 1e-6));
testing::DoubleNear(static_cast<double>(actual), error_thresh));
}

template <typename T>
void testTorchConv1DModel()
void testTorchConv1DModel(double error_thresh = 1e-6)
{
const auto model_file = std::string { RTNEURAL_ROOT_DIR } + "models/conv1d_torch.json";
std::ifstream jsonStream(model_file, std::ifstream::binary);
Expand Down Expand Up @@ -43,7 +47,7 @@ void testTorchConv1DModel()
{
for(size_t j = 0; j < outputs[n].size(); ++j)
{
expectNear(outputs[n + 4][j], expected_y[n][j]);
expectNear(outputs[n + 4][j], expected_y[n][j], error_thresh);
}
}
}
Expand All @@ -58,3 +62,17 @@ TEST(TestTorchConv1D, modelOutputMatchesPythonImplementationForDoubles)
{
testTorchConv1DModel<double>();
}

#if __STDCPP_FLOAT16_T__
TEST(TestTorchConv1D, modelOutputMatchesPythonImplementationForFloat16)
{
testTorchConv1DModel<std::float16_t>(1.0e-3);
}
#endif

#if __STDCPP_BFLOAT16_T__
TEST(TestTorchConv1D, modelOutputMatchesPythonImplementationForBFloat16)
{
testTorchConv1DModel<std::bfloat16_t>(1.0e-2);
}
#endif
22 changes: 20 additions & 2 deletions tests/functional/torch_gru_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
#include "load_csv.hpp"
#include <RTNeural/RTNeural.h>

#if __cplusplus > 202002L
#include <stdfloat>
#endif

namespace
{
template <typename T>
void testTorchGRUModel()
void testTorchGRUModel(double error_thresh = 1.0e-6)
{
using ModelType = RTNeural::ModelT<T, 1, 1,
RTNeural::GRULayerT<T, 1, 8>,
Expand Down Expand Up @@ -55,7 +59,7 @@ void testTorchGRUModel()
const auto expected_y = load_csv::loadFile<T>(modelOutputsFile);

using namespace testing;
EXPECT_THAT(outputs, Pointwise(DoubleNear(1e-6), expected_y));
EXPECT_THAT(outputs, Pointwise(DoubleNear(error_thresh), expected_y));
}
}

Expand All @@ -68,3 +72,17 @@ TEST(TestTorchGRU, modelOutputMatchesPythonImplementationForDoubles)
{
testTorchGRUModel<double>();
}

#if __STDCPP_FLOAT16_T__
TEST(TestTorchGRU, modelOutputMatchesPythonImplementationForFloat16)
{
testTorchGRUModel<std::float16_t>(1.0e-3);
}
#endif

#if __STDCPP_BFLOAT16_T__
TEST(TestTorchGRU, modelOutputMatchesPythonImplementationForBFloat16)
{
testTorchGRUModel<std::bfloat16_t>(1.0e-2);
}
#endif
22 changes: 20 additions & 2 deletions tests/functional/torch_lstm_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
#include "load_csv.hpp"
#include <RTNeural/RTNeural.h>

#if __cplusplus > 202002L
#include <stdfloat>
#endif

namespace
{
template <typename T>
void testTorchLSTMModel()
void testTorchLSTMModel(double error_thresh = 1.0e-6)
{
using ModelType = RTNeural::ModelT<T, 1, 1,
RTNeural::LSTMLayerT<T, 1, 8>,
Expand Down Expand Up @@ -55,7 +59,7 @@ void testTorchLSTMModel()
const auto expected_y = load_csv::loadFile<T>(modelOutputsFile);

using namespace testing;
EXPECT_THAT(outputs, Pointwise(DoubleNear(1e-6), expected_y));
EXPECT_THAT(outputs, Pointwise(DoubleNear(error_thresh), expected_y));
}
}

Expand All @@ -68,3 +72,17 @@ TEST(TestTorchLSTM, modelOutputMatchesPythonImplementationForDoubles)
{
testTorchLSTMModel<double>();
}

#if __STDCPP_FLOAT16_T__
TEST(TestTorchLSTM, modelOutputMatchesPythonImplementationForFloat16)
{
testTorchLSTMModel<std::float16_t>(1.0e-3);
}
#endif

#if __STDCPP_BFLOAT16_T__
TEST(TestTorchLSTM, modelOutputMatchesPythonImplementationForBFloat16)
{
testTorchLSTMModel<std::bfloat16_t>(1.0e-2);
}
#endif

0 comments on commit 29e41da

Please sign in to comment.