Skip to content

Commit

Permalink
Draft: Revisits strict build setting
Browse files Browse the repository at this point in the history
- Strict build for all projects including tests
  - To reduce bug in test code
- Introduce option to disable warning for external libraries
  - To reduce warning message from external libraries

ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh <[email protected]>
  • Loading branch information
hseok-oh committed Dec 12, 2024
1 parent c73dd4c commit cd180d1
Show file tree
Hide file tree
Showing 81 changed files with 225 additions and 211 deletions.
3 changes: 3 additions & 0 deletions Makefile.template
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ ifeq ($(BUILD_TYPE_LC),release)
INSTALL_OPTIONS+= --strip
endif

# Use strict build
OPTIONS+= -DENABLE_STRICT_BUILD=ON

WORKHOME=$(CURDIR)/Product
WORKFOLDER=$(TARGET_ARCH_LC)-$(TARGET_OS).$(BUILD_TYPE_LC)
WORKSPACE=$(WORKHOME)/$(WORKFOLDER)
Expand Down
4 changes: 2 additions & 2 deletions compiler/arser/include/arser/arser.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,12 +170,12 @@ class Argument
public:
explicit Argument(const std::string &arg_name) : _long_name{arg_name}, _names{arg_name} {}
explicit Argument(const std::string &short_name, const std::string &long_name)
: _short_name{short_name}, _long_name{long_name}, _names{short_name, long_name}
: _long_name{long_name}, _short_name{short_name}, _names{short_name, long_name}
{
}
explicit Argument(const std::string &short_name, const std::string &long_name,
const std::vector<std::string> &names)
: _short_name{short_name}, _long_name{long_name}, _names{names}
: _long_name{long_name}, _short_name{short_name}, _names{names}
{
// 'names' must have 'short_name' and 'long_name'.
auto it = std::find(names.begin(), names.end(), short_name);
Expand Down
3 changes: 1 addition & 2 deletions compute/ARMComputeEx/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ execute_process (
add_library(arm_compute_ex SHARED ${ACL_EX_SRCS})
target_include_directories(arm_compute_ex PUBLIC ${ACL_EX_BASE})
target_link_libraries(arm_compute_ex PRIVATE arm_compute)
target_link_libraries(arm_compute_ex PRIVATE nnfw_common)
target_link_libraries(arm_compute_ex PRIVATE nnfw_coverage)
# Defines to enable validate check in debug build
target_compile_definitions(arm_compute_ex PRIVATE EMBEDDED_KERNELS
Expand All @@ -29,7 +28,7 @@ target_compile_definitions(arm_compute_ex PRIVATE EMBEDDED_KERNELS
# Validate check functions are not used on release build
# Some parameter are used for validate check function call, and these parameter may not used on release build
# Because clang requires to add "-Wno-unused-parameter -Wno-unused-function" after "-Wall",
# this should be after linking nnfw_common and use interface lib linking
# this should be after interface lib linking
add_library(ignore_unused_warning INTERFACE)
target_compile_options(ignore_unused_warning INTERFACE -Wno-unused-parameter -Wno-unused-function)
target_link_libraries(arm_compute_ex PRIVATE $<$<NOT:$<CONFIG:Debug>>:ignore_unused_warning>)
Expand Down
7 changes: 4 additions & 3 deletions compute/cker/src/DepthwiseConv.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ template <typename T> class DepthwiseConvVerifier
const nnfw::cker::Shape &input_shape, const T *input_data,
const nnfw::cker::Shape &filter_shape, const T *filter_data,
const nnfw::cker::Shape &bias_shape, const T *bias_data,
const nnfw::cker::Shape &output_shape, const T *expected)
const nnfw::cker::Shape &output_shape)
{
std::vector<T> output(output_shape.FlatSize());
EXPECT_ANY_THROW(
Expand Down Expand Up @@ -293,11 +293,12 @@ TEST(CKer_Operation, neg_DepthwiseConv)
nnfw::cker::Shape bias_shape{1};
std::vector<float> bias = {0.0};
nnfw::cker::Shape output_shape{1, 3, 3, 1}; // n, h, w, c
std::vector<float> expected = {4.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0, 1.0};
// Expected output but not used - not supported yet
// std::vector<float> expected = {4.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0, 1.0};

DepthwiseConvVerifier<float> verifier;
verifier.prepare(output_shape, filter_shape);
verifier.checkException(params, input_shape, input.data(), filter_shape, filter.data(),
bias_shape, bias.data(), output_shape, expected.data());
bias_shape, bias.data(), output_shape);
}
}
6 changes: 3 additions & 3 deletions compute/cker/src/Range.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ TEST(CKer_Operation, Range)
std::vector<int> actual(10);
nnfw::cker::Range<int>(&start, &limit, &delta, actual.data());

for (int i = 0; i < actual.size(); i++)
for (size_t i = 0; i < actual.size(); i++)
ASSERT_EQ(actual[i], i);
}

Expand All @@ -40,7 +40,7 @@ TEST(CKer_Operation, Range)
std::vector<int> actual(expected.size());
nnfw::cker::Range<int>(&start, &limit, &delta, actual.data());

for (int i = 0; i < actual.size(); i++)
for (size_t i = 0; i < actual.size(); i++)
ASSERT_EQ(actual[i], expected[i]);
}

Expand All @@ -52,7 +52,7 @@ TEST(CKer_Operation, Range)
std::vector<float> actual(expected.size());
nnfw::cker::Range<float>(&start, &limit, &delta, actual.data());

for (int i = 0; i < actual.size(); i++)
for (size_t i = 0; i < actual.size(); i++)
ASSERT_FLOAT_EQ(actual[i], expected[i]);
}
}
Expand Down
2 changes: 1 addition & 1 deletion compute/cker/src/train/Adam.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ template <typename T> class AdamOptimizerVerifier

const T alpha = _learning_rate * std::sqrt(static_cast<T>(1) - beta2_power) /
(static_cast<T>(1) - beta1_power);
for (int i = 0; i < _expected.size(); ++i)
for (size_t i = 0; i < _expected.size(); ++i)
{
T m = _m.at(i);
T v = _v.at(i);
Expand Down
8 changes: 4 additions & 4 deletions compute/cker/src/train/AveragePool.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ template <typename T> class AvgPoolOpVerifier
void verifyForward(const std::vector<T> input, const std::vector<T> expected_output,
bool expect_eq = true)
{
assert(input.size() == _in_shape.FlatSize());
assert(expected_output.size() == _out_shape.FlatSize());
assert(input.size() == static_cast<size_t>(_in_shape.FlatSize()));
assert(expected_output.size() == static_cast<size_t>(_out_shape.FlatSize()));

std::vector<T> cacluated_output(_out_shape.FlatSize());
nnfw::cker::AveragePool<float>(_op_params, _in_shape, input.data(), _out_shape,
Expand All @@ -60,8 +60,8 @@ template <typename T> class AvgPoolOpVerifier
void verifyBackward(const std::vector<T> incoming_data, const std::vector<T> expected_grad_data,
bool expect_eq = true)
{
assert(incoming_data.size() == _out_shape.FlatSize());
assert(expected_grad_data.size() == _in_shape.FlatSize());
assert(incoming_data.size() == static_cast<size_t>(_out_shape.FlatSize()));
assert(expected_grad_data.size() == static_cast<size_t>(_in_shape.FlatSize()));

std::vector<T> calcuated_grad(_in_shape.FlatSize());
nnfw::cker::train::AveragePool2DGrad(_op_params, _out_shape, incoming_data.data(), _in_shape,
Expand Down
38 changes: 16 additions & 22 deletions compute/cker/src/train/Loss.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,29 +37,24 @@ template <typename T> class LossCCEVerifier
assert(y_pred.size() == y_true.size());

std::vector<T> output(_out_shape.FlatSize());
const int N = _in_shape.Dims(0);
const int D = _in_shape.FlatSize() / N;

nnfw::cker::train::CategoricalCrossEntropy(_in_shape, y_pred.data(), _in_shape, y_true.data(),
_out_shape, output.data());

// Don't be panic when it fails after kernel implementation or input is changed.
// CrossEntropy formula can be calculated slightly differently depending on the environment
// because it involes calculations such as log or exp.
for (int i = 0; i < output.size(); ++i)
for (size_t i = 0; i < output.size(); ++i)
{
EXPECT_NEAR(output[i], expected[i], 1e-4f);
}
}

void throwForward(const std::vector<T> &y_pred, const std::vector<T> &y_true,
const std::vector<T> &expected)
void throwForward(const std::vector<T> &y_pred, const std::vector<T> &y_true)
{
assert(y_pred.size() == y_true.size());

std::vector<T> output(_out_shape.FlatSize());
const int N = _in_shape.Dims(0);
const int D = _in_shape.FlatSize() / N;

EXPECT_ANY_THROW(nnfw::cker::train::CategoricalCrossEntropy(
_in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data()));
Expand All @@ -72,16 +67,14 @@ template <typename T> class LossCCEVerifier
assert(y_pred.size() == y_true.size());

std::vector<T> output(_in_shape.FlatSize());
const int N = _in_shape.Dims(0);
const int D = _in_shape.FlatSize() / N;

nnfw::cker::train::CategoricalCrossEntropyGrad(
_in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data(), reduction);

// Don't be panic when it fails after kernel implementation or input is changed.
// CrossEntropy Gradient formula can be calculated slightly differently depending on the
// environment because it involes calculations such as log or exp.
for (int i = 0; i < output.size(); ++i)
for (size_t i = 0; i < output.size(); ++i)
{
EXPECT_NEAR(output[i], expected[i], 1e-4f);
}
Expand All @@ -102,33 +95,32 @@ template <typename T> class LossCCEVerifier
y_true.data(), _out_shape, loss_out.data(),
_in_shape, grad.data(), reduction);

for (int i = 0; i < loss_out.size(); ++i)
for (size_t i = 0; i < loss_out.size(); ++i)
{
EXPECT_NEAR(loss_out[i], expected_loss_out[i], 1e-4f);
}

for (int i = 0; i < grad.size(); ++i)
for (size_t i = 0; i < grad.size(); ++i)
{
EXPECT_NEAR(grad[i], expected_grad[i], 1e-4f);
}
}

void throwBackward(const std::vector<T> &y_pred, const std::vector<T> &y_true,
const std::vector<T> &expected, nnfw::cker::train::LossReductionType reduction)
[[maybe_unused]] const std::vector<T> &expected,
nnfw::cker::train::LossReductionType reduction)
{
assert(y_pred.size() == y_true.size());

std::vector<T> output(_out_shape.FlatSize());
const int N = _in_shape.Dims(0);
const int D = _in_shape.FlatSize() / N;

EXPECT_ANY_THROW(nnfw::cker::train::CategoricalCrossEntropyGrad(
_in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data(), reduction));
}

void throwBackwardWithLogits(const std::vector<T> &logits, const std::vector<T> &y_true,
const std::vector<T> &expected_loss_out,
const std::vector<T> &expected_grad,
[[maybe_unused]] const std::vector<T> &expected_loss_out,
[[maybe_unused]] const std::vector<T> &expected_grad,
nnfw::cker::train::LossReductionType reduction)
{
assert(logits.size() == y_true.size());
Expand Down Expand Up @@ -186,7 +178,7 @@ TEST(CKer_Operation, LossMSE)
nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3}, y_pred.data(), nnfw::cker::Shape{2, 3},
y_true.data(), nnfw::cker::Shape{2}, output.data());

for (int i = 0; i < output.size(); ++i)
for (size_t i = 0; i < output.size(); ++i)
{
EXPECT_FLOAT_EQ(output[i], expected[i]);
}
Expand All @@ -204,7 +196,7 @@ TEST(CKer_Operation, LossMSE)
nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3, 4}, y_pred.data(), nnfw::cker::Shape{2, 3, 4},
y_true.data(), nnfw::cker::Shape{2}, output.data());

for (int i = 0; i < output.size(); ++i)
for (size_t i = 0; i < output.size(); ++i)
{
EXPECT_FLOAT_EQ(output[i], expected[i]);
}
Expand All @@ -223,7 +215,7 @@ TEST(CKer_Operation, neg_LossMSE)
nnfw::cker::train::MSE(nnfw::cker::Shape{2, 5}, y_pred.data(), nnfw::cker::Shape{2, 5},
y_true.data(), nnfw::cker::Shape{2}, output.data());

for (int i = 0; i < output.size(); ++i)
for (size_t i = 0; i < output.size(); ++i)
{
EXPECT_NE(output[i], expected[i]);
}
Expand Down Expand Up @@ -400,10 +392,12 @@ TEST(CKer_Operation, neg_LossCategoricalCrossEntropy)
std::vector<float> y_pred = {-2.86E-12, 2.82E-13, 0.99999845, 2.36E-07, 2.91E-16,
2.10E-07, 1.69E-14, 1.21E-17, 1.08E-06, 6.23E-18};
std::vector<float> y_true = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
std::vector<float> expected = {39.617155};

// Expected value, but not used for verification due to exception
// std::vector<float> expected = {39.617155};

LossCCEVerifier<float> verifier(in_shape, out_shape);
verifier.throwForward(y_pred, y_true, expected);
verifier.throwForward(y_pred, y_true);
}
}

Expand Down
8 changes: 4 additions & 4 deletions compute/cker/src/train/MaxPool.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ template <typename T> class MaxPoolOpVerifier
void verifyForward(const std::vector<T> input, const std::vector<T> expected_output,
bool expect_eq = true)
{
assert(input.size() == _in_shape.FlatSize());
assert(expected_output.size() == _out_shape.FlatSize());
assert(input.size() == static_cast<size_t>(_in_shape.FlatSize()));
assert(expected_output.size() == static_cast<size_t>(_out_shape.FlatSize()));

std::vector<T> cacluated_output(_out_shape.FlatSize());
nnfw::cker::train::MaxPool2D(_op_params, _in_shape, input.data(), _out_shape,
Expand All @@ -63,8 +63,8 @@ template <typename T> class MaxPoolOpVerifier
void verifyBackward(const std::vector<T> incoming_data, const std::vector<T> expected_grad_data,
bool expect_eq = true)
{
assert(incoming_data.size() == _out_shape.FlatSize());
assert(expected_grad_data.size() == _in_shape.FlatSize());
assert(incoming_data.size() == static_cast<size_t>(_out_shape.FlatSize()));
assert(expected_grad_data.size() == static_cast<size_t>(_in_shape.FlatSize()));

std::vector<T> calcuated_grad(_in_shape.FlatSize());
nnfw::cker::train::MaxPool2DGrad(_out_shape, incoming_data.data(), _arg_max_index.data(),
Expand Down
8 changes: 4 additions & 4 deletions compute/cker/src/train/Pad.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@ template <typename T> class PadOpVerifier
void verifyForward(const std::vector<T> input, const std::vector<T> expected_output,
bool expect_eq = true)
{
assert(input.size() == _in_shape.FlatSize());
assert(expected_output.size() == _out_shape.FlatSize());
assert(input.size() == static_cast<size_t>(_in_shape.FlatSize()));
assert(expected_output.size() == static_cast<size_t>(_out_shape.FlatSize()));

std::vector<T> cacluated_output(_out_shape.FlatSize());
nnfw::cker::Pad(_op_params.data, _op_params.rank, _in_shape, input.data(), _out_shape,
Expand All @@ -62,8 +62,8 @@ template <typename T> class PadOpVerifier
void verifyBackward(const std::vector<T> backward_output,
const std::vector<T> expected_backward_input, bool expect_eq = true)
{
assert(backward_output.size() == _out_shape.FlatSize());
assert(expected_backward_input.size() == _in_shape.FlatSize());
assert(backward_output.size() == static_cast<size_t>(_out_shape.FlatSize()));
assert(expected_backward_input.size() == static_cast<size_t>(_in_shape.FlatSize()));

std::vector<T> backward_input(_in_shape.FlatSize());
nnfw::cker::train::Depad(_op_params.data, _op_params.rank, _out_shape, backward_output.data(),
Expand Down
6 changes: 3 additions & 3 deletions compute/cker/src/train/ReduceMean.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ template <typename T> class ReduceMeanVerifier
void verifyForward(const std::vector<T> &input, const std::vector<T> &expected,
bool expect_eq = true)
{
assert(input.size() == _in_shape.FlatSize());
assert(expected.size() == _out_shape.FlatSize());
assert(input.size() == static_cast<size_t>(_in_shape.FlatSize()));
assert(expected.size() == static_cast<size_t>(_out_shape.FlatSize()));

std::vector<T> output(_out_shape.FlatSize());

Expand Down Expand Up @@ -69,7 +69,7 @@ template <typename T> class ReduceMeanVerifier
if (expect_eq)
{
// consider the floating point error
for (int i = 0; i < grad.size(); ++i)
for (size_t i = 0; i < grad.size(); ++i)
{
EXPECT_NEAR(grad[i], expected[i], 1e-3f);
}
Expand Down
2 changes: 1 addition & 1 deletion compute/cker/src/train/SGD.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ template <typename T> class SGDOptimizerVerifier
{
assert(_expected.size() == _gradient.size());

for (int i = 0; i < _expected.size(); ++i)
for (size_t i = 0; i < _expected.size(); ++i)
{
T g = _gradient.at(i);
T &var = _expected.at(i);
Expand Down
5 changes: 5 additions & 0 deletions infra/cmake/modules/ExternalProjectTools.cmake
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
macro(add_extdirectory DIR TAG)
# Disable warning messages from external source code
if(IGNORE_EXTERNAL_WARNINGS)
add_compile_options(-w)
endif(IGNORE_EXTERNAL_WARNINGS)

cmake_parse_arguments(ARG "EXCLUDE_FROM_ALL" "" "" ${ARGN})
if(ARG_EXCLUDE_FROM_ALL)
add_subdirectory(${DIR} "${CMAKE_BINARY_DIR}/externals/${TAG}" EXCLUDE_FROM_ALL)
Expand Down
2 changes: 1 addition & 1 deletion infra/debian/runtime/rules
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ override_dh_auto_build:
mkdir -p $(NNFW_WORKSPACE)
./nnfw configure -DCMAKE_BUILD_TYPE=Release -DEXTERNALS_BUILD_THREADS=$(NPROC) \
-DDOWNLOAD_GTEST=OFF -DENABLE_TEST=OFF \
-DBUILD_PYTHON_BINDING=OFF
-DBUILD_PYTHON_BINDING=OFF -DENABLE_STRICT_BUILD=ON
./nnfw build -j$(NPROC)
override_dh_auto_install:
./nnfw install --prefix $(NNFW_INSTALL_PREFIX) --strip
Expand Down
14 changes: 7 additions & 7 deletions infra/nnfw/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -95,16 +95,16 @@ if(${ENABLE_COVERAGE} AND NOT ${ENABLE_TEST})
message(FATAL_ERROR "Test should be enabled to measure test coverage")
endif(${ENABLE_COVERAGE} AND NOT ${ENABLE_TEST})

add_library(nnfw_common INTERFACE)
if(ENABLE_STRICT_BUILD)
target_compile_options(nnfw_common INTERFACE -Werror -Wall -Wextra)
add_compile_options(-Werror -Wall -Wextra)
endif(ENABLE_STRICT_BUILD)

macro(nnfw_strict_build TARGET)
if(ENABLE_STRICT_BUILD)
target_compile_options(${TARGET} PRIVATE -Werror -Wall -Wextra)
endif(ENABLE_STRICT_BUILD)
endmacro(nnfw_strict_build)
# Ease build by disabling all warning messages for strict build mode for specific targets
# (ex. 3rd-party libraries)
add_library(nnfw_ease_warning INTERFACE)
if(ENABLE_STRICT_BUILD)
target_compile_options(nnfw_ease_warning INTERFACE -w)
endif(ENABLE_STRICT_BUILD)

# TODO Replace using default build option setting in cmake/buildtool/config/config_linux.cmake
# to link nnfw_coverage on each module which want to check coverage
Expand Down
3 changes: 2 additions & 1 deletion infra/nnfw/cmake/CfgOptionFlags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ include("cmake/options/options_${TARGET_PLATFORM}.cmake")
#
# Default build configuration for project
#
option(ENABLE_STRICT_BUILD "Treat warning as error" ON)
option(ENABLE_STRICT_BUILD "Treat warning as error" OFF)
option(IGNORE_EXTERNAL_WARNINGS "Ignore warnings from external libraries" ON)
option(ENABLE_COVERAGE "Build for coverage test" OFF)
option(BUILD_EXT_MULTITHREAD "Build external build using multi thread" ON)
option(BUILD_ONERT "Build onert" ON)
Expand Down
2 changes: 2 additions & 0 deletions infra/nnfw/cmake/packages/OouraFFTConfig.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ function(_OouraFFT_build)
${OouraFFTSource_DIR}/fftsg2d.c
${OouraFFTSource_DIR}/fftsg.c
)
# Ignore strict build warnings
target_link_libraries(fft2d_fftsg2d PRIVATE nnfw_ease_warning)
add_library(oourafft::fftsg2d ALIAS fft2d_fftsg2d)
endif(NOT TARGET oourafft::fftsg2d)

Expand Down
2 changes: 0 additions & 2 deletions infra/nnfw/cmake/packages/PthreadpoolConfig.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,6 @@ function(_Pthreadpool_Build)

add_extdirectory("${PthreadpoolSource_DIR}" PTHREADPOOL EXCLUDE_FROM_ALL)
set_target_properties(pthreadpool PROPERTIES POSITION_INDEPENDENT_CODE ON)
# Suppress warnings generated by pthreadpool
set_target_properties(pthreadpool PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations")
set(PthreadpoolSource_DIR ${PthreadpoolSource_DIR} PARENT_SCOPE)
set(Pthreadpool_FOUND TRUE PARENT_SCOPE)
endfunction(_Pthreadpool_Build)
Expand Down
Loading

0 comments on commit cd180d1

Please sign in to comment.