diff --git a/Makefile.template b/Makefile.template index 4a93c1acf3d..781466a8b95 100644 --- a/Makefile.template +++ b/Makefile.template @@ -83,6 +83,9 @@ ifeq ($(BUILD_TYPE_LC),release) INSTALL_OPTIONS+= --strip endif +# Use strict build +OPTIONS+= -DENABLE_STRICT_BUILD=ON + WORKHOME=$(CURDIR)/Product WORKFOLDER=$(TARGET_ARCH_LC)-$(TARGET_OS).$(BUILD_TYPE_LC) WORKSPACE=$(WORKHOME)/$(WORKFOLDER) diff --git a/compiler/arser/include/arser/arser.h b/compiler/arser/include/arser/arser.h index 6e071545de8..0128f915a9f 100644 --- a/compiler/arser/include/arser/arser.h +++ b/compiler/arser/include/arser/arser.h @@ -170,12 +170,12 @@ class Argument public: explicit Argument(const std::string &arg_name) : _long_name{arg_name}, _names{arg_name} {} explicit Argument(const std::string &short_name, const std::string &long_name) - : _short_name{short_name}, _long_name{long_name}, _names{short_name, long_name} + : _long_name{long_name}, _short_name{short_name}, _names{short_name, long_name} { } explicit Argument(const std::string &short_name, const std::string &long_name, const std::vector &names) - : _short_name{short_name}, _long_name{long_name}, _names{names} + : _long_name{long_name}, _short_name{short_name}, _names{names} { // 'names' must have 'short_name' and 'long_name'. auto it = std::find(names.begin(), names.end(), short_name); diff --git a/compute/ARMComputeEx/CMakeLists.txt b/compute/ARMComputeEx/CMakeLists.txt index ee23cacf000..bc35b160e51 100644 --- a/compute/ARMComputeEx/CMakeLists.txt +++ b/compute/ARMComputeEx/CMakeLists.txt @@ -20,7 +20,6 @@ execute_process ( add_library(arm_compute_ex SHARED ${ACL_EX_SRCS}) target_include_directories(arm_compute_ex PUBLIC ${ACL_EX_BASE}) target_link_libraries(arm_compute_ex PRIVATE arm_compute) -target_link_libraries(arm_compute_ex PRIVATE nnfw_common) target_link_libraries(arm_compute_ex PRIVATE nnfw_coverage) # Defines to enable validate check in debug build target_compile_definitions(arm_compute_ex PRIVATE EMBEDDED_KERNELS @@ -29,7 +28,7 @@ target_compile_definitions(arm_compute_ex PRIVATE EMBEDDED_KERNELS # Validate check functions are not used on release build # Some parameter are used for validate check function call, and these parameter may not used on release build # Because clang requires to add "-Wno-unused-parameter -Wno-unused-function" after "-Wall", -# this should be after linking nnfw_common and use interface lib linking +# this should be after interface lib linking add_library(ignore_unused_warning INTERFACE) target_compile_options(ignore_unused_warning INTERFACE -Wno-unused-parameter -Wno-unused-function) target_link_libraries(arm_compute_ex PRIVATE $<$>:ignore_unused_warning>) diff --git a/compute/cker/src/DepthwiseConv.test.cc b/compute/cker/src/DepthwiseConv.test.cc index ecb7c97db97..25fafa76dca 100644 --- a/compute/cker/src/DepthwiseConv.test.cc +++ b/compute/cker/src/DepthwiseConv.test.cc @@ -75,7 +75,7 @@ template class DepthwiseConvVerifier const nnfw::cker::Shape &input_shape, const T *input_data, const nnfw::cker::Shape &filter_shape, const T *filter_data, const nnfw::cker::Shape &bias_shape, const T *bias_data, - const nnfw::cker::Shape &output_shape, const T *expected) + const nnfw::cker::Shape &output_shape) { std::vector output(output_shape.FlatSize()); EXPECT_ANY_THROW( @@ -293,11 +293,12 @@ TEST(CKer_Operation, neg_DepthwiseConv) nnfw::cker::Shape bias_shape{1}; std::vector bias = {0.0}; nnfw::cker::Shape output_shape{1, 3, 3, 1}; // n, h, w, c - std::vector expected = {4.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0, 1.0}; + // Expected output but not used - not supported yet + // std::vector expected = {4.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0, 1.0}; DepthwiseConvVerifier verifier; verifier.prepare(output_shape, filter_shape); verifier.checkException(params, input_shape, input.data(), filter_shape, filter.data(), - bias_shape, bias.data(), output_shape, expected.data()); + bias_shape, bias.data(), output_shape); } } diff --git a/compute/cker/src/Range.test.cc b/compute/cker/src/Range.test.cc index e5fe4801f68..b1059aa5cd2 100644 --- a/compute/cker/src/Range.test.cc +++ b/compute/cker/src/Range.test.cc @@ -28,7 +28,7 @@ TEST(CKer_Operation, Range) std::vector actual(10); nnfw::cker::Range(&start, &limit, &delta, actual.data()); - for (int i = 0; i < actual.size(); i++) + for (size_t i = 0; i < actual.size(); i++) ASSERT_EQ(actual[i], i); } @@ -40,7 +40,7 @@ TEST(CKer_Operation, Range) std::vector actual(expected.size()); nnfw::cker::Range(&start, &limit, &delta, actual.data()); - for (int i = 0; i < actual.size(); i++) + for (size_t i = 0; i < actual.size(); i++) ASSERT_EQ(actual[i], expected[i]); } @@ -52,7 +52,7 @@ TEST(CKer_Operation, Range) std::vector actual(expected.size()); nnfw::cker::Range(&start, &limit, &delta, actual.data()); - for (int i = 0; i < actual.size(); i++) + for (size_t i = 0; i < actual.size(); i++) ASSERT_FLOAT_EQ(actual[i], expected[i]); } } diff --git a/compute/cker/src/train/Adam.test.cc b/compute/cker/src/train/Adam.test.cc index 0d0effe30e3..180028f94a7 100644 --- a/compute/cker/src/train/Adam.test.cc +++ b/compute/cker/src/train/Adam.test.cc @@ -72,7 +72,7 @@ template class AdamOptimizerVerifier const T alpha = _learning_rate * std::sqrt(static_cast(1) - beta2_power) / (static_cast(1) - beta1_power); - for (int i = 0; i < _expected.size(); ++i) + for (size_t i = 0; i < _expected.size(); ++i) { T m = _m.at(i); T v = _v.at(i); diff --git a/compute/cker/src/train/AveragePool.test.cc b/compute/cker/src/train/AveragePool.test.cc index 51be5e5edd7..562e9498a8b 100644 --- a/compute/cker/src/train/AveragePool.test.cc +++ b/compute/cker/src/train/AveragePool.test.cc @@ -44,8 +44,8 @@ template class AvgPoolOpVerifier void verifyForward(const std::vector input, const std::vector expected_output, bool expect_eq = true) { - assert(input.size() == _in_shape.FlatSize()); - assert(expected_output.size() == _out_shape.FlatSize()); + assert(input.size() == static_cast(_in_shape.FlatSize())); + assert(expected_output.size() == static_cast(_out_shape.FlatSize())); std::vector cacluated_output(_out_shape.FlatSize()); nnfw::cker::AveragePool(_op_params, _in_shape, input.data(), _out_shape, @@ -60,8 +60,8 @@ template class AvgPoolOpVerifier void verifyBackward(const std::vector incoming_data, const std::vector expected_grad_data, bool expect_eq = true) { - assert(incoming_data.size() == _out_shape.FlatSize()); - assert(expected_grad_data.size() == _in_shape.FlatSize()); + assert(incoming_data.size() == static_cast(_out_shape.FlatSize())); + assert(expected_grad_data.size() == static_cast(_in_shape.FlatSize())); std::vector calcuated_grad(_in_shape.FlatSize()); nnfw::cker::train::AveragePool2DGrad(_op_params, _out_shape, incoming_data.data(), _in_shape, diff --git a/compute/cker/src/train/Loss.test.cc b/compute/cker/src/train/Loss.test.cc index ff894a5a74f..323609907b1 100644 --- a/compute/cker/src/train/Loss.test.cc +++ b/compute/cker/src/train/Loss.test.cc @@ -37,8 +37,6 @@ template class LossCCEVerifier assert(y_pred.size() == y_true.size()); std::vector output(_out_shape.FlatSize()); - const int N = _in_shape.Dims(0); - const int D = _in_shape.FlatSize() / N; nnfw::cker::train::CategoricalCrossEntropy(_in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data()); @@ -46,20 +44,17 @@ template class LossCCEVerifier // Don't be panic when it fails after kernel implementation or input is changed. // CrossEntropy formula can be calculated slightly differently depending on the environment // because it involes calculations such as log or exp. - for (int i = 0; i < output.size(); ++i) + for (size_t i = 0; i < output.size(); ++i) { EXPECT_NEAR(output[i], expected[i], 1e-4f); } } - void throwForward(const std::vector &y_pred, const std::vector &y_true, - const std::vector &expected) + void throwForward(const std::vector &y_pred, const std::vector &y_true) { assert(y_pred.size() == y_true.size()); std::vector output(_out_shape.FlatSize()); - const int N = _in_shape.Dims(0); - const int D = _in_shape.FlatSize() / N; EXPECT_ANY_THROW(nnfw::cker::train::CategoricalCrossEntropy( _in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data())); @@ -72,8 +67,6 @@ template class LossCCEVerifier assert(y_pred.size() == y_true.size()); std::vector output(_in_shape.FlatSize()); - const int N = _in_shape.Dims(0); - const int D = _in_shape.FlatSize() / N; nnfw::cker::train::CategoricalCrossEntropyGrad( _in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data(), reduction); @@ -81,7 +74,7 @@ template class LossCCEVerifier // Don't be panic when it fails after kernel implementation or input is changed. // CrossEntropy Gradient formula can be calculated slightly differently depending on the // environment because it involes calculations such as log or exp. - for (int i = 0; i < output.size(); ++i) + for (size_t i = 0; i < output.size(); ++i) { EXPECT_NEAR(output[i], expected[i], 1e-4f); } @@ -102,33 +95,32 @@ template class LossCCEVerifier y_true.data(), _out_shape, loss_out.data(), _in_shape, grad.data(), reduction); - for (int i = 0; i < loss_out.size(); ++i) + for (size_t i = 0; i < loss_out.size(); ++i) { EXPECT_NEAR(loss_out[i], expected_loss_out[i], 1e-4f); } - for (int i = 0; i < grad.size(); ++i) + for (size_t i = 0; i < grad.size(); ++i) { EXPECT_NEAR(grad[i], expected_grad[i], 1e-4f); } } void throwBackward(const std::vector &y_pred, const std::vector &y_true, - const std::vector &expected, nnfw::cker::train::LossReductionType reduction) + [[maybe_unused]] const std::vector &expected, + nnfw::cker::train::LossReductionType reduction) { assert(y_pred.size() == y_true.size()); std::vector output(_out_shape.FlatSize()); - const int N = _in_shape.Dims(0); - const int D = _in_shape.FlatSize() / N; EXPECT_ANY_THROW(nnfw::cker::train::CategoricalCrossEntropyGrad( _in_shape, y_pred.data(), _in_shape, y_true.data(), _out_shape, output.data(), reduction)); } void throwBackwardWithLogits(const std::vector &logits, const std::vector &y_true, - const std::vector &expected_loss_out, - const std::vector &expected_grad, + [[maybe_unused]] const std::vector &expected_loss_out, + [[maybe_unused]] const std::vector &expected_grad, nnfw::cker::train::LossReductionType reduction) { assert(logits.size() == y_true.size()); @@ -186,7 +178,7 @@ TEST(CKer_Operation, LossMSE) nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3}, y_pred.data(), nnfw::cker::Shape{2, 3}, y_true.data(), nnfw::cker::Shape{2}, output.data()); - for (int i = 0; i < output.size(); ++i) + for (size_t i = 0; i < output.size(); ++i) { EXPECT_FLOAT_EQ(output[i], expected[i]); } @@ -204,7 +196,7 @@ TEST(CKer_Operation, LossMSE) nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3, 4}, y_pred.data(), nnfw::cker::Shape{2, 3, 4}, y_true.data(), nnfw::cker::Shape{2}, output.data()); - for (int i = 0; i < output.size(); ++i) + for (size_t i = 0; i < output.size(); ++i) { EXPECT_FLOAT_EQ(output[i], expected[i]); } @@ -223,7 +215,7 @@ TEST(CKer_Operation, neg_LossMSE) nnfw::cker::train::MSE(nnfw::cker::Shape{2, 5}, y_pred.data(), nnfw::cker::Shape{2, 5}, y_true.data(), nnfw::cker::Shape{2}, output.data()); - for (int i = 0; i < output.size(); ++i) + for (size_t i = 0; i < output.size(); ++i) { EXPECT_NE(output[i], expected[i]); } @@ -400,10 +392,12 @@ TEST(CKer_Operation, neg_LossCategoricalCrossEntropy) std::vector y_pred = {-2.86E-12, 2.82E-13, 0.99999845, 2.36E-07, 2.91E-16, 2.10E-07, 1.69E-14, 1.21E-17, 1.08E-06, 6.23E-18}; std::vector y_true = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; - std::vector expected = {39.617155}; + + // Expected value, but not used for verification due to exception + // std::vector expected = {39.617155}; LossCCEVerifier verifier(in_shape, out_shape); - verifier.throwForward(y_pred, y_true, expected); + verifier.throwForward(y_pred, y_true); } } diff --git a/compute/cker/src/train/MaxPool.test.cc b/compute/cker/src/train/MaxPool.test.cc index b408a230c50..e3770c82209 100644 --- a/compute/cker/src/train/MaxPool.test.cc +++ b/compute/cker/src/train/MaxPool.test.cc @@ -47,8 +47,8 @@ template class MaxPoolOpVerifier void verifyForward(const std::vector input, const std::vector expected_output, bool expect_eq = true) { - assert(input.size() == _in_shape.FlatSize()); - assert(expected_output.size() == _out_shape.FlatSize()); + assert(input.size() == static_cast(_in_shape.FlatSize())); + assert(expected_output.size() == static_cast(_out_shape.FlatSize())); std::vector cacluated_output(_out_shape.FlatSize()); nnfw::cker::train::MaxPool2D(_op_params, _in_shape, input.data(), _out_shape, @@ -63,8 +63,8 @@ template class MaxPoolOpVerifier void verifyBackward(const std::vector incoming_data, const std::vector expected_grad_data, bool expect_eq = true) { - assert(incoming_data.size() == _out_shape.FlatSize()); - assert(expected_grad_data.size() == _in_shape.FlatSize()); + assert(incoming_data.size() == static_cast(_out_shape.FlatSize())); + assert(expected_grad_data.size() == static_cast(_in_shape.FlatSize())); std::vector calcuated_grad(_in_shape.FlatSize()); nnfw::cker::train::MaxPool2DGrad(_out_shape, incoming_data.data(), _arg_max_index.data(), diff --git a/compute/cker/src/train/Pad.test.cc b/compute/cker/src/train/Pad.test.cc index 269534f12fa..aa0e8ba47d7 100644 --- a/compute/cker/src/train/Pad.test.cc +++ b/compute/cker/src/train/Pad.test.cc @@ -46,8 +46,8 @@ template class PadOpVerifier void verifyForward(const std::vector input, const std::vector expected_output, bool expect_eq = true) { - assert(input.size() == _in_shape.FlatSize()); - assert(expected_output.size() == _out_shape.FlatSize()); + assert(input.size() == static_cast(_in_shape.FlatSize())); + assert(expected_output.size() == static_cast(_out_shape.FlatSize())); std::vector cacluated_output(_out_shape.FlatSize()); nnfw::cker::Pad(_op_params.data, _op_params.rank, _in_shape, input.data(), _out_shape, @@ -62,8 +62,8 @@ template class PadOpVerifier void verifyBackward(const std::vector backward_output, const std::vector expected_backward_input, bool expect_eq = true) { - assert(backward_output.size() == _out_shape.FlatSize()); - assert(expected_backward_input.size() == _in_shape.FlatSize()); + assert(backward_output.size() == static_cast(_out_shape.FlatSize())); + assert(expected_backward_input.size() == static_cast(_in_shape.FlatSize())); std::vector backward_input(_in_shape.FlatSize()); nnfw::cker::train::Depad(_op_params.data, _op_params.rank, _out_shape, backward_output.data(), diff --git a/compute/cker/src/train/ReduceMean.test.cc b/compute/cker/src/train/ReduceMean.test.cc index 856f6e903de..03a154485e6 100644 --- a/compute/cker/src/train/ReduceMean.test.cc +++ b/compute/cker/src/train/ReduceMean.test.cc @@ -39,8 +39,8 @@ template class ReduceMeanVerifier void verifyForward(const std::vector &input, const std::vector &expected, bool expect_eq = true) { - assert(input.size() == _in_shape.FlatSize()); - assert(expected.size() == _out_shape.FlatSize()); + assert(input.size() == static_cast(_in_shape.FlatSize())); + assert(expected.size() == static_cast(_out_shape.FlatSize())); std::vector output(_out_shape.FlatSize()); @@ -69,7 +69,7 @@ template class ReduceMeanVerifier if (expect_eq) { // consider the floating point error - for (int i = 0; i < grad.size(); ++i) + for (size_t i = 0; i < grad.size(); ++i) { EXPECT_NEAR(grad[i], expected[i], 1e-3f); } diff --git a/compute/cker/src/train/SGD.test.cc b/compute/cker/src/train/SGD.test.cc index 0a819b33633..627061f40bb 100644 --- a/compute/cker/src/train/SGD.test.cc +++ b/compute/cker/src/train/SGD.test.cc @@ -55,7 +55,7 @@ template class SGDOptimizerVerifier { assert(_expected.size() == _gradient.size()); - for (int i = 0; i < _expected.size(); ++i) + for (size_t i = 0; i < _expected.size(); ++i) { T g = _gradient.at(i); T &var = _expected.at(i); diff --git a/infra/cmake/modules/ExternalProjectTools.cmake b/infra/cmake/modules/ExternalProjectTools.cmake index afa290c3ea4..56c02cd62e5 100644 --- a/infra/cmake/modules/ExternalProjectTools.cmake +++ b/infra/cmake/modules/ExternalProjectTools.cmake @@ -1,4 +1,9 @@ macro(add_extdirectory DIR TAG) + # Disable warning messages from external source code + if(DISABLE_EXTERNAL_WARNING) + add_compile_options(-w) + endif(DISABLE_EXTERNAL_WARNING) + cmake_parse_arguments(ARG "EXCLUDE_FROM_ALL" "" "" ${ARGN}) if(ARG_EXCLUDE_FROM_ALL) add_subdirectory(${DIR} "${CMAKE_BINARY_DIR}/externals/${TAG}" EXCLUDE_FROM_ALL) diff --git a/infra/debian/runtime/rules b/infra/debian/runtime/rules index 42c66cc56bb..b66c8b6f448 100755 --- a/infra/debian/runtime/rules +++ b/infra/debian/runtime/rules @@ -18,7 +18,7 @@ override_dh_auto_build: mkdir -p $(NNFW_WORKSPACE) ./nnfw configure -DCMAKE_BUILD_TYPE=Release -DEXTERNALS_BUILD_THREADS=$(NPROC) \ -DDOWNLOAD_GTEST=OFF -DENABLE_TEST=OFF \ - -DBUILD_PYTHON_BINDING=OFF + -DBUILD_PYTHON_BINDING=OFF -DENABLE_STRICT_BUILD=ON ./nnfw build -j$(NPROC) override_dh_auto_install: ./nnfw install --prefix $(NNFW_INSTALL_PREFIX) --strip diff --git a/infra/nnfw/CMakeLists.txt b/infra/nnfw/CMakeLists.txt index 2c3a30a3616..427a103bbd2 100644 --- a/infra/nnfw/CMakeLists.txt +++ b/infra/nnfw/CMakeLists.txt @@ -95,16 +95,16 @@ if(${ENABLE_COVERAGE} AND NOT ${ENABLE_TEST}) message(FATAL_ERROR "Test should be enabled to measure test coverage") endif(${ENABLE_COVERAGE} AND NOT ${ENABLE_TEST}) -add_library(nnfw_common INTERFACE) if(ENABLE_STRICT_BUILD) - target_compile_options(nnfw_common INTERFACE -Werror -Wall -Wextra) + add_compile_options(-Werror -Wall -Wextra) endif(ENABLE_STRICT_BUILD) -macro(nnfw_strict_build TARGET) - if(ENABLE_STRICT_BUILD) - target_compile_options(${TARGET} PRIVATE -Werror -Wall -Wextra) - endif(ENABLE_STRICT_BUILD) -endmacro(nnfw_strict_build) +# Ease build by disabling all warning messages for strict build mode for specific targets +# (ex. 3rd-party libraries) +add_library(nnfw_ease_warning INTERFACE) +if(ENABLE_STRICT_BUILD) + target_compile_options(nnfw_ease_warning INTERFACE -w) +endif(ENABLE_STRICT_BUILD) # TODO Replace using default build option setting in cmake/buildtool/config/config_linux.cmake # to link nnfw_coverage on each module which want to check coverage diff --git a/infra/nnfw/cmake/CfgOptionFlags.cmake b/infra/nnfw/cmake/CfgOptionFlags.cmake index bb1db0d2869..392c198792f 100644 --- a/infra/nnfw/cmake/CfgOptionFlags.cmake +++ b/infra/nnfw/cmake/CfgOptionFlags.cmake @@ -10,7 +10,8 @@ include("cmake/options/options_${TARGET_PLATFORM}.cmake") # # Default build configuration for project # -option(ENABLE_STRICT_BUILD "Treat warning as error" ON) +option(ENABLE_STRICT_BUILD "Treat warning as error" OFF) +option(DISABLE_EXTERNAL_WARNING "Ignore warnings from external libraries" ON) option(ENABLE_COVERAGE "Build for coverage test" OFF) option(BUILD_EXT_MULTITHREAD "Build external build using multi thread" ON) option(BUILD_ONERT "Build onert" ON) diff --git a/infra/nnfw/cmake/packages/OouraFFTConfig.cmake b/infra/nnfw/cmake/packages/OouraFFTConfig.cmake index ad226a13b75..ed5e85f55f0 100644 --- a/infra/nnfw/cmake/packages/OouraFFTConfig.cmake +++ b/infra/nnfw/cmake/packages/OouraFFTConfig.cmake @@ -12,6 +12,8 @@ function(_OouraFFT_build) ${OouraFFTSource_DIR}/fftsg2d.c ${OouraFFTSource_DIR}/fftsg.c ) + # Ignore strict build warnings + target_link_libraries(fft2d_fftsg2d PRIVATE nnfw_ease_warning) add_library(oourafft::fftsg2d ALIAS fft2d_fftsg2d) endif(NOT TARGET oourafft::fftsg2d) diff --git a/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake b/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake index 6283826f6e4..ebf73d6578a 100644 --- a/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake +++ b/infra/nnfw/cmake/packages/PthreadpoolConfig.cmake @@ -22,8 +22,6 @@ function(_Pthreadpool_Build) add_extdirectory("${PthreadpoolSource_DIR}" PTHREADPOOL EXCLUDE_FROM_ALL) set_target_properties(pthreadpool PROPERTIES POSITION_INDEPENDENT_CODE ON) - # Suppress warnings generated by pthreadpool - set_target_properties(pthreadpool PROPERTIES COMPILE_FLAGS "-Wno-deprecated-declarations") set(PthreadpoolSource_DIR ${PthreadpoolSource_DIR} PARENT_SCOPE) set(Pthreadpool_FOUND TRUE PARENT_SCOPE) endfunction(_Pthreadpool_Build) diff --git a/packaging/nnfw.spec b/packaging/nnfw.spec index 480f4f3d49c..f60d1b3fa12 100644 --- a/packaging/nnfw.spec +++ b/packaging/nnfw.spec @@ -148,7 +148,7 @@ If you want to get coverage info, you should install runtime package which is bu %endif # test_build # Set option for configuration -%define option_config %{nil} +%define option_config -DENABLE_STRICT_BUILD=ON %if %{config_support} == 1 %endif # config_support diff --git a/runtime/3rdparty/CMakeLists.txt b/runtime/3rdparty/CMakeLists.txt index 99d2028f460..ee77ffab83b 100644 --- a/runtime/3rdparty/CMakeLists.txt +++ b/runtime/3rdparty/CMakeLists.txt @@ -1,3 +1,8 @@ +# Disable warning messages from external source code +if(DISABLE_EXTERNAL_WARNING) + add_compile_options(-w) +endif(DISABLE_EXTERNAL_WARNING) + # Add all subdirectories. # Each library in sub-directory must have it's own CMakeLists.txt # to build library's binaries or to support interface. diff --git a/runtime/libs/misc/CMakeLists.txt b/runtime/libs/misc/CMakeLists.txt index 3e02adbc3fb..d297090022b 100644 --- a/runtime/libs/misc/CMakeLists.txt +++ b/runtime/libs/misc/CMakeLists.txt @@ -6,7 +6,6 @@ list(REMOVE_ITEM SOURCES ${TESTS}) add_library(nnfw_lib_misc STATIC ${SOURCES}) target_include_directories(nnfw_lib_misc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) set_target_properties(nnfw_lib_misc PROPERTIES POSITION_INDEPENDENT_CODE ON) -target_link_libraries(nnfw_lib_misc PRIVATE nnfw_common) target_link_libraries(nnfw_lib_misc PRIVATE nnfw_coverage) if(NOT ENABLE_TEST) diff --git a/runtime/libs/misc/src/tensor/IndexIterator.test.cpp b/runtime/libs/misc/src/tensor/IndexIterator.test.cpp index 875786bddd5..894fd7e46c7 100644 --- a/runtime/libs/misc/src/tensor/IndexIterator.test.cpp +++ b/runtime/libs/misc/src/tensor/IndexIterator.test.cpp @@ -56,6 +56,6 @@ TEST(MiscIndexIteratorTest, neg_zero_rank_shape) // It is expected not to throw any exception, do nothing const Shape shape{}; - ASSERT_NO_THROW(iterate(shape) << ([](const Index &index) {})); + ASSERT_NO_THROW(iterate(shape) << ([](const Index &) {})); SUCCEED(); } diff --git a/runtime/libs/ndarray/CMakeLists.txt b/runtime/libs/ndarray/CMakeLists.txt index 8d0ba0487cd..dd2bab80a76 100644 --- a/runtime/libs/ndarray/CMakeLists.txt +++ b/runtime/libs/ndarray/CMakeLists.txt @@ -10,7 +10,6 @@ if(${NDARRAY_INLINE_TEMPLATES}) target_compile_definitions(ndarray PUBLIC -DNDARRAY_INLINE_TEMPLATES=1) endif() -target_link_libraries(ndarray PRIVATE nnfw_common) target_link_libraries(ndarray PRIVATE nnfw_coverage) if(NOT ENABLE_TEST) diff --git a/runtime/libs/ndarray/example/example_no_array.cpp b/runtime/libs/ndarray/example/example_no_array.cpp index 3a4d05dca83..8a2408c8251 100644 --- a/runtime/libs/ndarray/example/example_no_array.cpp +++ b/runtime/libs/ndarray/example/example_no_array.cpp @@ -19,6 +19,7 @@ #include #include #include +#include void gather_no_array(const float *in_data, const std::array &dims, float *out_data, const std::array &out_dims, //[nselections, @@ -26,9 +27,9 @@ void gather_no_array(const float *in_data, const std::array &dims, fl { assert(indices_dims[1] == dims.size()); - for (int i = 0; i < indices_dims[0]; ++i) + for (uint32_t i = 0; i < indices_dims[0]; ++i) { - for (int j = 0; j < indices_dims[1]; ++j) + for (uint32_t j = 0; j < indices_dims[1]; ++j) { const int *index_ptr = indices + i * indices_dims[2] * indices_dims[1] + j * indices_dims[2]; @@ -40,7 +41,7 @@ void gather_no_array(const float *in_data, const std::array &dims, fl float *out_ptr = out_data + out_offset; - for (int k = 0; k < dims[2]; ++k) + for (uint32_t k = 0; k < dims[2]; ++k) { out_ptr[k] = in_ptr[k]; } diff --git a/runtime/libs/profiling/CMakeLists.txt b/runtime/libs/profiling/CMakeLists.txt index b115cc1c655..7169508a160 100644 --- a/runtime/libs/profiling/CMakeLists.txt +++ b/runtime/libs/profiling/CMakeLists.txt @@ -3,4 +3,3 @@ file(GLOB_RECURSE SOURCES "src/*.cpp") add_library(nnfw_lib_profiling STATIC ${SOURCES}) set_property(TARGET nnfw_lib_profiling PROPERTY POSITION_INDEPENDENT_CODE ON) target_include_directories(nnfw_lib_profiling PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) -target_link_libraries(nnfw_lib_profiling PRIVATE nnfw_common) diff --git a/runtime/onert/api/nnfw/CMakeLists.txt b/runtime/onert/api/nnfw/CMakeLists.txt index 1f864e5612f..8c71c0da797 100644 --- a/runtime/onert/api/nnfw/CMakeLists.txt +++ b/runtime/onert/api/nnfw/CMakeLists.txt @@ -11,7 +11,6 @@ set(NNFW_API_HEADERS include/nnfw.h include/nnfw_experimental.h) target_link_libraries(${ONERT_DEV} PRIVATE onert_core) target_link_libraries(${ONERT_DEV} PRIVATE nnfw_lib_misc) target_link_libraries(${ONERT_DEV} PRIVATE jsoncpp ${LIB_PTHREAD}) -target_link_libraries(${ONERT_DEV} PRIVATE nnfw_common) target_link_libraries(${ONERT_DEV} PRIVATE nnfw_coverage) target_link_libraries(${ONERT_DEV} PRIVATE circle_schema) # NOTE Below line is added to remove warning for android build diff --git a/runtime/onert/api/python/src/nnfw_api_wrapper.cc b/runtime/onert/api/python/src/nnfw_api_wrapper.cc index 64e4fc5cfc8..513311fd367 100644 --- a/runtime/onert/api/python/src/nnfw_api_wrapper.cc +++ b/runtime/onert/api/python/src/nnfw_api_wrapper.cc @@ -39,6 +39,9 @@ void ensure_status(NNFW_STATUS status) case NNFW_STATUS::NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE: std::cout << "[ERROR]\tNNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE\n"; exit(1); + case NNFW_STATUS::NNFW_STATUS_DEPRECATED_API: + std::cout << "[ERROR]\tNNFW_STATUS_DEPRECATED_API\n"; + exit(1); } } @@ -129,7 +132,7 @@ const char *getStringType(NNFW_TYPE type) uint64_t num_elems(const nnfw_tensorinfo *tensor_info) { uint64_t n = 1; - for (uint32_t i = 0; i < tensor_info->rank; ++i) + for (int32_t i = 0; i < tensor_info->rank; ++i) { n *= tensor_info->dims[i]; } diff --git a/runtime/onert/backend/acl_cl/CMakeLists.txt b/runtime/onert/backend/acl_cl/CMakeLists.txt index 4c9ad3c38c5..5daf5da2cd3 100644 --- a/runtime/onert/backend/acl_cl/CMakeLists.txt +++ b/runtime/onert/backend/acl_cl/CMakeLists.txt @@ -11,7 +11,6 @@ file(GLOB_RECURSE SOURCES "*.cc") add_library(${LIB_ONERT_BACKEND_ACL_CL} SHARED ${SOURCES}) target_link_libraries(${LIB_ONERT_BACKEND_ACL_CL} PRIVATE ${LIB_ONERT_BACKEND_ACL_COMMON}) -target_link_libraries(${LIB_ONERT_BACKEND_ACL_CL} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_ACL_CL} PRIVATE nnfw_coverage) # Set install rpath to find onert_core and acl library diff --git a/runtime/onert/backend/acl_common/CMakeLists.txt b/runtime/onert/backend/acl_common/CMakeLists.txt index 8d409a47c91..c7bfe771e0d 100644 --- a/runtime/onert/backend/acl_common/CMakeLists.txt +++ b/runtime/onert/backend/acl_common/CMakeLists.txt @@ -13,7 +13,6 @@ target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC onert_core) target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC arm_compute arm_compute_ex) target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC nnfw_lib_misc) target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PUBLIC ${LIB_ONERT_BACKEND_CL_COMMON}) -target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_ACL_COMMON} PRIVATE nnfw_coverage) set_target_properties(${LIB_ONERT_BACKEND_ACL_COMMON} PROPERTIES POSITION_INDEPENDENT_CODE ON) diff --git a/runtime/onert/backend/acl_neon/CMakeLists.txt b/runtime/onert/backend/acl_neon/CMakeLists.txt index 8b6a7894b98..85fe50d5ba8 100644 --- a/runtime/onert/backend/acl_neon/CMakeLists.txt +++ b/runtime/onert/backend/acl_neon/CMakeLists.txt @@ -11,7 +11,6 @@ file(GLOB_RECURSE SOURCES "*.cc") add_library(${LIB_ONERT_BACKEND_ACL_NEON} SHARED ${SOURCES}) target_link_libraries(${LIB_ONERT_BACKEND_ACL_NEON} PRIVATE ${LIB_ONERT_BACKEND_ACL_COMMON}) -target_link_libraries(${LIB_ONERT_BACKEND_ACL_NEON} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_ACL_NEON} PRIVATE nnfw_coverage) # Set install rpath to find onert_core and acl library diff --git a/runtime/onert/backend/cpu/CMakeLists.txt b/runtime/onert/backend/cpu/CMakeLists.txt index b55894d7e34..bb076a223d9 100644 --- a/runtime/onert/backend/cpu/CMakeLists.txt +++ b/runtime/onert/backend/cpu/CMakeLists.txt @@ -9,7 +9,6 @@ add_library(${LIB_ONERT_BACKEND_CPU} SHARED ${SOURCES}) target_include_directories(${LIB_ONERT_BACKEND_CPU} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_lib_cker nnfw_lib_misc) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE onert_core) -target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE nnfw_coverage) target_link_libraries(${LIB_ONERT_BACKEND_CPU} PRIVATE ruy) target_link_libraries(${LIB_ONERT_BACKEND_CPU} INTERFACE ruy_instrumentation) diff --git a/runtime/onert/backend/ruy/CMakeLists.txt b/runtime/onert/backend/ruy/CMakeLists.txt index efc44c85b0f..3120eab37d1 100644 --- a/runtime/onert/backend/ruy/CMakeLists.txt +++ b/runtime/onert/backend/ruy/CMakeLists.txt @@ -8,7 +8,6 @@ add_library(${LIB_ONERT_BACKEND_RUY} SHARED ${SOURCES}) target_link_libraries(${LIB_ONERT_BACKEND_RUY} PRIVATE nnfw_lib_ruy) target_link_libraries(${LIB_ONERT_BACKEND_RUY} PRIVATE onert_core) -target_link_libraries(${LIB_ONERT_BACKEND_RUY} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_RUY} PRIVATE nnfw_coverage) target_link_libraries(${LIB_ONERT_BACKEND_RUY} PRIVATE ruy) diff --git a/runtime/onert/backend/train/CMakeLists.txt b/runtime/onert/backend/train/CMakeLists.txt index 9d4235073dd..2a74d69939d 100644 --- a/runtime/onert/backend/train/CMakeLists.txt +++ b/runtime/onert/backend/train/CMakeLists.txt @@ -9,7 +9,6 @@ add_library(${LIB_ONERT_BACKEND_TRAIN} SHARED ${SOURCES}) target_link_libraries(${LIB_ONERT_BACKEND_TRAIN} PRIVATE ${LIB_ONERT_BACKEND_CPU}) target_link_libraries(${LIB_ONERT_BACKEND_TRAIN} PRIVATE onert_core) target_link_libraries(${LIB_ONERT_BACKEND_TRAIN} PRIVATE nnfw_lib_cker nnfw_lib_misc) -target_link_libraries(${LIB_ONERT_BACKEND_TRAIN} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_TRAIN} PRIVATE nnfw_coverage) set_target_properties(${LIB_ONERT_BACKEND_TRAIN} PROPERTIES diff --git a/runtime/onert/backend/train/MemoryPlanner.test.cc b/runtime/onert/backend/train/MemoryPlanner.test.cc index 7a908b5df87..55320b49868 100644 --- a/runtime/onert/backend/train/MemoryPlanner.test.cc +++ b/runtime/onert/backend/train/MemoryPlanner.test.cc @@ -181,7 +181,7 @@ TEST(FirstFitPlanner, neg_disposable_release_non_existing_index) { PlannerVerifier p; - auto on_only_debug_mode = [&p]() { + [[maybe_unused]] auto on_only_debug_mode = [&p]() { EXPECT_DEATH({ p.release(0, 1); }, "Cannot release for given index. It has been not claimed or released already."); return true; @@ -206,7 +206,7 @@ TEST(FirstFitPlanner, neg_disposable_release_twice) { PlannerVerifier p; - auto on_only_debug_mode = [&p]() { + [[maybe_unused]] auto on_only_debug_mode = [&p]() { EXPECT_EXIT({ p.release(0, 0); }, ::testing::KilledBySignal(SIGABRT), "Cannot release for given index. It has been not claimed or released already."); return true; @@ -368,7 +368,7 @@ TEST(FirstFitPlanner, neg_layerscope_release_non_existing_index) { PlannerVerifier p; - auto on_only_debug_mode = [&p]() { + [[maybe_unused]] auto on_only_debug_mode = [&p]() { EXPECT_DEATH({ p.release(0, 1); }, "Cannot release for given index. It has been not claimed or released already."); return true; @@ -393,7 +393,7 @@ TEST(FirstFitPlanner, neg_layerscope_release_twice) { PlannerVerifier p; - auto on_only_debug_mode = [&p]() { + [[maybe_unused]] auto on_only_debug_mode = [&p]() { EXPECT_EXIT({ p.release(0, 0); }, ::testing::KilledBySignal(SIGABRT), "Cannot release for given index. It has been not claimed or released already."); return true; diff --git a/runtime/onert/backend/trix/CMakeLists.txt b/runtime/onert/backend/trix/CMakeLists.txt index cfbf6f0fbeb..affc4827e79 100644 --- a/runtime/onert/backend/trix/CMakeLists.txt +++ b/runtime/onert/backend/trix/CMakeLists.txt @@ -13,7 +13,6 @@ add_library(${LIB_ONERT_BACKEND_TRIX} SHARED ${SOURCES}) target_link_libraries(${LIB_ONERT_BACKEND_TRIX} PRIVATE onert_core) target_link_libraries(${LIB_ONERT_BACKEND_TRIX} PRIVATE trix_engine) -target_link_libraries(${LIB_ONERT_BACKEND_TRIX} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_TRIX} PRIVATE nnfw_coverage) set_target_properties(${LIB_ONERT_BACKEND_TRIX} PROPERTIES diff --git a/runtime/onert/backend/xnnpack/CMakeLists.txt b/runtime/onert/backend/xnnpack/CMakeLists.txt index 874539c5656..f7a95f5e0a2 100644 --- a/runtime/onert/backend/xnnpack/CMakeLists.txt +++ b/runtime/onert/backend/xnnpack/CMakeLists.txt @@ -11,7 +11,6 @@ file(GLOB_RECURSE SOURCES "*.cc") add_library(${LIB_ONERT_BACKEND_XNNPACK} SHARED ${SOURCES}) target_link_libraries(${LIB_ONERT_BACKEND_XNNPACK} PRIVATE onert_core) -target_link_libraries(${LIB_ONERT_BACKEND_XNNPACK} PRIVATE nnfw_common) target_link_libraries(${LIB_ONERT_BACKEND_XNNPACK} PRIVATE nnfw_coverage) target_link_libraries(${LIB_ONERT_BACKEND_XNNPACK} PRIVATE pthreadpool) target_link_libraries(${LIB_ONERT_BACKEND_XNNPACK} PRIVATE XNNPACK) diff --git a/runtime/onert/core/CMakeLists.txt b/runtime/onert/core/CMakeLists.txt index 87ac93e86fb..61ef0970190 100644 --- a/runtime/onert/core/CMakeLists.txt +++ b/runtime/onert/core/CMakeLists.txt @@ -18,7 +18,6 @@ target_link_libraries(onert_core PRIVATE circle_schema) target_link_libraries(onert_core PRIVATE jsoncpp half) target_link_libraries(onert_core PRIVATE nnfw_lib_misc nnfw_lib_cker) -target_link_libraries(onert_core PRIVATE nnfw_common) target_link_libraries(onert_core PRIVATE nnfw_coverage) target_link_libraries(onert_core PRIVATE dl ${LIB_PTHREAD}) diff --git a/runtime/onert/core/src/compiler/HEScheduler.test.cc b/runtime/onert/core/src/compiler/HEScheduler.test.cc index 75a21fa724b..e39e2059cf9 100644 --- a/runtime/onert/core/src/compiler/HEScheduler.test.cc +++ b/runtime/onert/core/src/compiler/HEScheduler.test.cc @@ -123,7 +123,8 @@ void setExecutor(const std::string &executor) { setenv("EXECUTOR", executor.c_st void setProfilingMode(const bool value) { setenv("PROFILING_MODE", value ? "1" : "0", true); } // Calculate operation size by addition sizes of all input and output operands -uint32_t calcOpSize(const std::shared_ptr &graph, const OperationIndex &op_idx) +[[maybe_unused]] uint32_t calcOpSize(const std::shared_ptr &graph, + const OperationIndex &op_idx) { uint32_t size = 0; const auto &op = graph->operations().at(op_idx); @@ -152,7 +153,7 @@ void setOperationsExecutionTime(const std::vector &backends, { assert(op_names.size() == op_sizes.size()); ExecTime et(backends); - for (int i = 0; i < op_names.size(); ++i) + for (uint32_t i = 0; i < op_names.size(); ++i) { for (const auto backend : backends) setOperationExecTime(et, backend, op_names[i], false, op_sizes[i], exec_time); @@ -273,13 +274,13 @@ std::shared_ptr createBranchedGraph() // Create fc1 node auto fc1_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op); auto fc1_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op); - FullyConnected::Param fc1_op_params{Activation::NONE}; + FullyConnected::Param fc1_op_params{Activation::NONE, FullyConnectedWeightsFormat::Default}; create(graph, OIS{add_out_idx, fc1_const_idx}, OIS{fc1_out_idx}, fc1_op_params); // Create fc2 node auto fc2_const_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op); auto fc2_out_idx = graph->addOperand(ir::Shape{OPERAND_ELEMS}, float_op); - FullyConnected::Param fc2_op_params{Activation::NONE}; + FullyConnected::Param fc2_op_params{Activation::NONE, FullyConnectedWeightsFormat::Default}; create(graph, OIS{fc1_out_idx, fc2_const_idx}, OIS{fc2_out_idx}, fc2_op_params); // Create sub node diff --git a/runtime/onert/core/src/exec/IPermuteFunction.test.cc b/runtime/onert/core/src/exec/IPermuteFunction.test.cc index 586e2305708..897741acab7 100644 --- a/runtime/onert/core/src/exec/IPermuteFunction.test.cc +++ b/runtime/onert/core/src/exec/IPermuteFunction.test.cc @@ -34,7 +34,7 @@ class MockUpTensor : public ITensor { public: MockUpTensor(const Shape &shape, const TypeInfo &type_info, Layout layout, size_t pad) - : _shape(shape), _type_info(type_info), _data(nullptr), _layout(layout) + : _shape(shape), _type_info(type_info), _layout(layout), _data(nullptr) { _strides.resize(shape.rank()); @@ -61,7 +61,7 @@ class MockUpTensor : public ITensor size_t calcOffset(const ir::Coordinates &coords) const override { size_t offset = 0; - for (size_t i = 0; i < _shape.rank(); ++i) + for (int i = 0; i < _shape.rank(); ++i) { offset += (_strides[i] * coords[i]); } @@ -547,7 +547,7 @@ TEST(IPermuteFunction, qasymm8_to_float) int32_t min_val = std::numeric_limits::min(); int32_t max_val = std::numeric_limits::max(); - for (int32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) + for (uint32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) { int32_t unclamped = static_cast(std::round(expected_buffer[i] / scale)) + zero_point; input_buffer[i] = std::min(std::max(unclamped, min_val), max_val); @@ -607,7 +607,7 @@ TEST(IPermuteFunction, qsymm8_to_float) int32_t min_val = std::numeric_limits::min(); int32_t max_val = std::numeric_limits::max(); - for (int32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) + for (uint32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) { int32_t unclamped = static_cast(std::round(expected_buffer[i] / scale)) + zero_point; input_buffer[i] = std::min(std::max(unclamped, min_val), max_val); @@ -667,7 +667,7 @@ TEST(IPermuteFunction, qsymm16_to_float) int32_t min_val = std::numeric_limits::min(); int32_t max_val = std::numeric_limits::max(); - for (int32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) + for (uint32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) { int32_t unclamped = static_cast(std::round(expected_buffer[i] / scale)) + zero_point; input_buffer[i] = std::min(std::max(unclamped, min_val), max_val); @@ -816,7 +816,7 @@ TEST(IPermuteFunction, float_qasymm8_layout) int32_t min_val = std::numeric_limits::min(); int32_t max_val = std::numeric_limits::max(); - for (int32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) + for (uint32_t i = 0; i < sizeof(expected_buffer) / sizeof(float); ++i) { int32_t unclamped = static_cast(std::round(expected_buffer[i] / scale)) + zero_point; input_buffer[i] = std::min(std::max(unclamped, min_val), max_val); @@ -836,7 +836,7 @@ TEST(IPermuteFunction, float_qasymm8_layout) } TypeInfo type_info{DataType::QUANT_UINT8_ASYMM, scale, zero_point}; inputs[i] = std::make_unique(shape, type_info, layout, input_pads[i]); - inputs[i]->setBuffer(reinterpret_cast(expected_buffer)); + inputs[i]->setBuffer(reinterpret_cast(input_buffer)); if (layout == Layout::NHWC) { diff --git a/runtime/onert/core/src/exec/feature/MockTensor.test.h b/runtime/onert/core/src/exec/feature/MockTensor.test.h index bdddad99aae..b936ae3146f 100644 --- a/runtime/onert/core/src/exec/feature/MockTensor.test.h +++ b/runtime/onert/core/src/exec/feature/MockTensor.test.h @@ -54,7 +54,7 @@ template class MockTensor : public onert::backend::ITensor const std::vector &data_scales() const override { return _dummy_scales; } const std::vector &data_zero_points() const override { return _dummy_zerops; } bool has_padding() const override { return false; } - void access(const std::function &fn) override {} + void access(const std::function &) override {} bool is_dynamic() const override { return false; } private: diff --git a/runtime/onert/core/src/ir/train/TrainableGraph.test.cc b/runtime/onert/core/src/ir/train/TrainableGraph.test.cc index 7b755dc9d18..a97d1933d9c 100644 --- a/runtime/onert/core/src/ir/train/TrainableGraph.test.cc +++ b/runtime/onert/core/src/ir/train/TrainableGraph.test.cc @@ -224,9 +224,9 @@ TEST(TrainableGraph, truncating_backward_topological_order_nonlinear) tgraph.addInput({y_true}); tgraph.addOutput({output}); - auto ea1 = addElementwiseActivationOperation(tgraph, {input}, {u}); + addElementwiseActivationOperation(tgraph, {input}, {u}); auto fc1 = addFullyConnectedOperation(tgraph, {u, weight1, bias1}, {v}); - auto ea2 = addElementwiseActivationOperation(tgraph, {input}, {w}); + addElementwiseActivationOperation(tgraph, {input}, {w}); auto fc2 = addFullyConnectedOperation(tgraph, {w, weight2, bias2}, {x}); auto add = addAddOperation(tgraph, {v, x}, {y_pred}); auto loss = addLossOperation(tgraph, {y_pred, y_true}, {output}); @@ -291,10 +291,10 @@ TEST(TrainableGraph, truncating_backward_topological_order_nonlinear) auto fc3 = addFullyConnectedOperation(tgraph, {input1, weight3}, {r}); auto add1 = addAddOperation(tgraph, {r, input}, {s}); - auto add2 = addAddOperation(tgraph, {input, input2}, {t}); + addAddOperation(tgraph, {input, input2}, {t}); auto ea1 = addElementwiseActivationOperation(tgraph, {s}, {u}); auto fc1 = addFullyConnectedOperation(tgraph, {u, weight1, bias1}, {v}); - auto ea2 = addElementwiseActivationOperation(tgraph, {t}, {w}); + addElementwiseActivationOperation(tgraph, {t}, {w}); auto fc2 = addFullyConnectedOperation(tgraph, {w, weight2, bias2}, {x}); auto add = addAddOperation(tgraph, {v, x}, {y_pred}); auto loss = addLossOperation(tgraph, {y_pred, y_true}, {output}); @@ -363,10 +363,10 @@ TEST(TrainableGraph, essential_backward_topological_order_nonlinear) auto fc3 = addFullyConnectedOperation(tgraph, {input1, weight3}, {r}); auto add1 = addAddOperation(tgraph, {r, input}, {s}); - auto add2 = addAddOperation(tgraph, {input, input2}, {t}); + addAddOperation(tgraph, {input, input2}, {t}); auto ea1 = addElementwiseActivationOperation(tgraph, {s}, {u}); auto fc1 = addFullyConnectedOperation(tgraph, {u, weight1, bias1}, {v}); - auto ea2 = addElementwiseActivationOperation(tgraph, {t}, {w}); + addElementwiseActivationOperation(tgraph, {t}, {w}); auto fc2 = addFullyConnectedOperation(tgraph, {w, weight2, bias2}, {x}); auto add = addAddOperation(tgraph, {v, x}, {y_pred}); auto loss = addLossOperation(tgraph, {y_pred, y_true}, {output}); diff --git a/runtime/onert/core/src/ir/train/UseDefGenerator.test.cc b/runtime/onert/core/src/ir/train/UseDefGenerator.test.cc index 86bfcc3e824..e12601de2ca 100644 --- a/runtime/onert/core/src/ir/train/UseDefGenerator.test.cc +++ b/runtime/onert/core/src/ir/train/UseDefGenerator.test.cc @@ -610,7 +610,6 @@ TEST(UseDefGenerator, one_op) operation::ElementwiseActivation::Param param; param.op_type = operation::ElementwiseActivation::Type::RELU; param.alpha = std::numeric_limits::infinity(); - param.beta == 0.f; const auto ea_op = operation::ElementwiseActivation({ea_input}, {y_pred}, param); const auto ea_index = tgraph.addOperation(std::make_unique(ea_op)); @@ -1824,7 +1823,6 @@ TEST(UseDefGenerator, one_op) operation::ElementwiseActivation::Param ea_param; ea_param.op_type = operation::ElementwiseActivation::Type::RELU; ea_param.alpha = std::numeric_limits::infinity(); - ea_param.beta == 0.f; const auto ea_op = operation::ElementwiseActivation({fc_out}, {ea_out}, ea_param); const auto ea_index = tgraph.addOperation(std::make_unique(ea_op)); diff --git a/runtime/onert/core/src/ir/verifier/Verifier.test.cc b/runtime/onert/core/src/ir/verifier/Verifier.test.cc index 1ec71cd55b7..22de6819a8e 100644 --- a/runtime/onert/core/src/ir/verifier/Verifier.test.cc +++ b/runtime/onert/core/src/ir/verifier/Verifier.test.cc @@ -84,7 +84,7 @@ TEST(Verifier, neg_edge_consistency_checker_2) auto mock_op = std::make_unique(IndexSet{operand1}, IndexSet{operand2}); auto mock_op_ptr = mock_op.get(); - auto op_ind = graph.addOperation(std::move(mock_op)); + graph.addOperation(std::move(mock_op)); mock_op_ptr->setInputs({operand2}); // Manipulate the operation alone diff --git a/runtime/onert/core/src/odc/QuantizerLoader.test.cc b/runtime/onert/core/src/odc/QuantizerLoader.test.cc index 112e65b2762..e7d02a04953 100644 --- a/runtime/onert/core/src/odc/QuantizerLoader.test.cc +++ b/runtime/onert/core/src/odc/QuantizerLoader.test.cc @@ -56,7 +56,9 @@ TEST(odc_QuantizerLoader, neg_unload) { QuantizerLoader &loader = QuantizerLoader::instance(); if (loader.loadLibrary() == 0) + { ASSERT_NE(loader.get(), nullptr); + } ASSERT_EQ(loader.unloadLibrary(), 0); ASSERT_EQ(loader.get(), nullptr); diff --git a/runtime/onert/core/src/util/ObjectManager.test.cc b/runtime/onert/core/src/util/ObjectManager.test.cc index 3fe73573219..0cd8499563a 100644 --- a/runtime/onert/core/src/util/ObjectManager.test.cc +++ b/runtime/onert/core/src/util/ObjectManager.test.cc @@ -151,12 +151,12 @@ TEST(ObjectManager, const_iterate) { util::ObjectManager man; - auto index0 = man.emplace(100); - auto index1 = man.emplace(200); - auto index2 = man.emplace(300); + man.emplace(100); + man.emplace(200); + man.emplace(300); int sum = 0; - man.iterate([&](const Index &index, const int &val) { sum += val; }); + man.iterate([&](const Index &, const int &val) { sum += val; }); ASSERT_EQ(sum, 600); } @@ -168,7 +168,7 @@ TEST(ObjectManager, non_const_iterate) auto index1 = man.emplace(200); auto index2 = man.emplace(300); - man.iterate([&](const Index &index, int &val) { val += 1; }); + man.iterate([&](const Index &, int &val) { val += 1; }); ASSERT_EQ(man.at(index0), 101); ASSERT_EQ(man.at(index1), 201); ASSERT_EQ(man.at(index2), 301); diff --git a/runtime/onert/loader/trix/CMakeLists.txt b/runtime/onert/loader/trix/CMakeLists.txt index 4d4f505f874..77ad2128b7f 100644 --- a/runtime/onert/loader/trix/CMakeLists.txt +++ b/runtime/onert/loader/trix/CMakeLists.txt @@ -17,7 +17,7 @@ set_target_properties(tvn_loader PROPERTIES POSITION_INDEPENDENT_CODE ON INSTALL_RPATH "$ORIGIN:$ORIGIN/..") target_link_libraries(tvn_loader PRIVATE onert_core) -target_link_libraries(tvn_loader PRIVATE nnfw_common nnfw_coverage) +target_link_libraries(tvn_loader PRIVATE nnfw_coverage) target_link_libraries(tvn_loader PRIVATE trix_engine) install(TARGETS tvn_loader DESTINATION lib/nnfw/loader) diff --git a/runtime/onert/odc/CMakeLists.txt b/runtime/onert/odc/CMakeLists.txt index 681077f469a..36ebbbe05bc 100644 --- a/runtime/onert/odc/CMakeLists.txt +++ b/runtime/onert/odc/CMakeLists.txt @@ -10,7 +10,6 @@ list(REMOVE_ITEM SOURCES ${TESTS}) add_library(onert_odc SHARED ${SOURCES}) target_link_libraries(onert_odc PRIVATE onert_core luci::import luci::export luci::pass luci::loco) -target_link_libraries(onert_odc PRIVATE nnfw_common) target_link_libraries(onert_odc PRIVATE nnfw_coverage) install(TARGETS onert_odc LIBRARY DESTINATION lib/nnfw/odc) diff --git a/tests/custom_op/FillFrom/FillFrom_runner.cc b/tests/custom_op/FillFrom/FillFrom_runner.cc index 6b09d5db687..8109e5730ad 100644 --- a/tests/custom_op/FillFrom/FillFrom_runner.cc +++ b/tests/custom_op/FillFrom/FillFrom_runner.cc @@ -61,7 +61,7 @@ uint64_t NowMicros() uint64_t num_elems(const nnfw_tensorinfo *ti) { uint64_t n = 1; - for (uint32_t i = 0; i < ti->rank; ++i) + for (int32_t i = 0; i < ti->rank; ++i) { assert(ti->dims[i] >= 0); n *= ti->dims[i]; @@ -190,7 +190,6 @@ int main(const int argc, char **argv) auto generateInputs = [session, num_inputs, &inputs]() { // generate random data - const int seed = 1; for (uint32_t i = 0; i < num_inputs; ++i) { nnfw_tensorinfo ti; diff --git a/tests/libs/benchmark/src/MemoryInfo.cpp b/tests/libs/benchmark/src/MemoryInfo.cpp index 20d262961b7..82ffda22a22 100644 --- a/tests/libs/benchmark/src/MemoryInfo.cpp +++ b/tests/libs/benchmark/src/MemoryInfo.cpp @@ -31,7 +31,7 @@ const std::string proc_status_path("/proc/self/status"); const std::string gpu_memory_path("/sys/kernel/debug/mali0/gpu_memory"); const std::string proc_smaps_path("/proc/self/smaps"); -bool isStrNumber(const std::string &s) +[[maybe_unused]] bool isStrNumber(const std::string &s) { return !s.empty() && std::find_if(s.begin(), s.end(), [](char c) { return !std::isdigit(c); }) == s.end(); diff --git a/tests/libs/benchmark/src/MemoryPoller.cpp b/tests/libs/benchmark/src/MemoryPoller.cpp index 62339306ef3..43f6221351a 100644 --- a/tests/libs/benchmark/src/MemoryPoller.cpp +++ b/tests/libs/benchmark/src/MemoryPoller.cpp @@ -27,7 +27,7 @@ namespace benchmark { MemoryPoller::MemoryPoller(std::chrono::milliseconds duration, bool gpu_poll) - : _duration(duration), _run(false), _term(false), _gpu_poll(gpu_poll) + : _duration(duration), _term(false), _run(false), _gpu_poll(gpu_poll) { if (prepareMemoryPolling() == false) throw std::runtime_error("failed to prepare memory pooling"); diff --git a/tests/libs/nnapi/CMakeLists.txt b/tests/libs/nnapi/CMakeLists.txt index eb48a7beac1..2066ddb16d4 100644 --- a/tests/libs/nnapi/CMakeLists.txt +++ b/tests/libs/nnapi/CMakeLists.txt @@ -7,7 +7,6 @@ set(LIB_NNAPI onert_nnapi) add_library(${LIB_NNAPI} STATIC ${SOURCES_FRONTEND}) target_include_directories(${LIB_NNAPI} PUBLIC include) target_link_libraries(${LIB_NNAPI} PUBLIC onert_core) # TODO Link PRIVATE onert_core -target_link_libraries(${LIB_NNAPI} PRIVATE nnfw_common) target_link_libraries(${LIB_NNAPI} PRIVATE nnfw_coverage) if(NOT ENABLE_TEST) diff --git a/tests/libs/tflite/CMakeLists.txt b/tests/libs/tflite/CMakeLists.txt index 4898dd15fc1..f1ee8baa7ef 100644 --- a/tests/libs/tflite/CMakeLists.txt +++ b/tests/libs/tflite/CMakeLists.txt @@ -14,7 +14,6 @@ target_include_directories(nnfw_lib_tflite PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/in target_link_libraries(nnfw_lib_tflite PUBLIC tensorflow-lite-2.16.1) target_link_libraries(nnfw_lib_tflite PUBLIC nnfw_lib_misc nnfw_lib_benchmark) target_link_libraries(nnfw_lib_tflite PRIVATE ${LIB_PTHREAD} dl) -target_link_libraries(nnfw_lib_tflite PRIVATE nnfw_common) if(NOT ENABLE_TEST) return() diff --git a/tests/libs/tflite/src/TensorView.test.cpp b/tests/libs/tflite/src/TensorView.test.cpp index c710b3c3394..a41b8772829 100644 --- a/tests/libs/tflite/src/TensorView.test.cpp +++ b/tests/libs/tflite/src/TensorView.test.cpp @@ -33,7 +33,7 @@ void int_test(void) assert(view.at(nnfw::misc::tensor::Index{1, 2}) == 6); } -int main(int argc, char **argv) +int main() { float value[6] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}; diff --git a/tests/nnapi/CMakeLists.txt b/tests/nnapi/CMakeLists.txt index b1fd64596f5..460887bb165 100644 --- a/tests/nnapi/CMakeLists.txt +++ b/tests/nnapi/CMakeLists.txt @@ -55,6 +55,9 @@ target_link_libraries(${RUNTIME_NNAPI_TEST} ${LIB_PTHREAD} dl) # Set INSTALL_RPATH to find onert_core set_target_properties(${RUNTIME_NNAPI_TEST} PROPERTIES INSTALL_RPATH "$ORIGIN/../lib/nnfw") +# Ignore strict build warnings +target_link_libraries(${RUNTIME_NNAPI_TEST} nnfw_ease_warning) + install(TARGETS ${RUNTIME_NNAPI_TEST} DESTINATION nnapi-gtest) # Default test backend: cpu diff --git a/tests/nnfw_api/lib/CircleGen.h b/tests/nnfw_api/lib/CircleGen.h index ac95ba9b2e1..6b24ce93be7 100644 --- a/tests/nnfw_api/lib/CircleGen.h +++ b/tests/nnfw_api/lib/CircleGen.h @@ -98,7 +98,7 @@ class CircleGen std::vector shape; circle::TensorType tensor_type = circle::TensorType::TensorType_FLOAT32; uint32_t buffer = 0; - std::string name; + std::string name = ""; }; struct OperatorParams diff --git a/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc b/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc index 1a224ae0645..cb89ec09f14 100644 --- a/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc +++ b/tests/nnfw_api/src/GenModelTests/BranchModelTrain.test.cc @@ -18,9 +18,11 @@ TEST_F(GenModelTrain, BranchOps_FC_Add) { - // (( Input 0 )) -> [ FC ] ----\ - // |=> [ Add ] -> (( Output 0 )) - // (( Input 1 )) --------------/ + /* + (( Input 0 )) -> [ FC ] ----\ + |=> [ Add ] -> (( Output 0 )) + (( Input 1 )) --------------/ + */ { CirclePlusGen cgen; @@ -57,9 +59,11 @@ TEST_F(GenModelTrain, BranchOps_FC_Add) SUCCEED(); } - // (( Input 0 )) -> [ FC ] -> (fc_out) --------------------------╲ - // ╲ |=> [ Add ] -> (( Output 0 )) - // ╲-> [ Relu6 ]⎼> (ea_out) -╱ + /* + (( Input 0 )) -> [ FC ] -> (fc_out) --------------------------╲ + ╲ |=> [ Add ] -> (( Output 0 )) + ╲-> [ Relu6 ]⎼> (ea_out) -╱ + */ { CirclePlusGen cgen; @@ -101,9 +105,11 @@ TEST_F(GenModelTrain, BranchOps_FC_Add) TEST_F(GenModelTrain, BranchOps_FC_Sub) { - // (( Input 0 )) --------------\ - // |=> [ Sub ] -> (( Output 0 )) - // (( Input 1 )) -> [ FC ] ----/ + /* + (( Input 0 )) --------------\ + |=> [ Sub ] -> (( Output 0 )) + (( Input 1 )) -> [ FC ] ----/ + */ { CirclePlusGen cgen; @@ -140,9 +146,11 @@ TEST_F(GenModelTrain, BranchOps_FC_Sub) SUCCEED(); } - // (( Input 0 )) -> [ FC ] -> (fc1_out) ------------------------╲ - // ╲ |=> [ Sub ] -> (( Output 0 )) - // ╲-> [ FC ]⎼> (fc2_out) -╱ + /* + (( Input 0 )) -> [ FC ] -> (fc1_out) ------------------------╲ + ╲ |=> [ Sub ] -> (( Output 0 )) + ╲-> [ FC ]⎼> (fc2_out) -╱ + */ { CirclePlusGen cgen; @@ -188,9 +196,11 @@ TEST_F(GenModelTrain, BranchOps_FC_Sub) TEST_F(GenModelTrain, BranchOps_FC_Mul) { - // (( Input 0 )) -> [ FC ] ----\ - // |=> [ Mul ] -> [ FC ] -> (( Output 0 )) - // (( Input 1 )) -> [ FC ] ----/ + /* + (( Input 0 )) -> [ FC ] ----\ + |=> [ Mul ] -> [ FC ] -> (( Output 0 )) + (( Input 1 )) -> [ FC ] ----/ + */ { CirclePlusGen cgen; @@ -239,9 +249,11 @@ TEST_F(GenModelTrain, BranchOps_FC_Mul) SUCCEED(); } - // (( Input 0 )) -> [ FC ] -> (fc1_out) ------------------------╲ - // ╲ |=> [ Mul ] -> (( Output 0 )) - // ╲-> [ FC ]⎼> (fc2_out) -╱ + /* + (( Input 0 )) -> [ FC ] -> (fc1_out) ------------------------╲ + ╲ |=> [ Mul ] -> (( Output 0 )) + ╲-> [ FC ]⎼> (fc2_out) -╱ + */ { CirclePlusGen cgen; diff --git a/tests/nnfw_api/src/GenModelTests/General.test.cc b/tests/nnfw_api/src/GenModelTests/General.test.cc index 2a279dbaa11..6d0f1e409ee 100644 --- a/tests/nnfw_api/src/GenModelTests/General.test.cc +++ b/tests/nnfw_api/src/GenModelTests/General.test.cc @@ -62,11 +62,13 @@ TEST_F(GenModelTest, UnusedConstOutputAndAdd) TEST_F(GenModelTest, UsedConstOutput) { - // (( Input 1 )) ---------\ - // |=> [ Add ] -> (( Output 1 )) - // (( Const Output 2 )) --< - // |=> [ Add ] -> (( Output 0 )) - // (( Input 0 )) ---------/ + /* + (( Input 1 )) ---------\ + |=> [ Add ] -> (( Output 1 )) + (( Const Output 2 )) --< + |=> [ Add ] -> (( Output 0 )) + (( Input 0 )) ---------/ + */ CircleGen cgen; uint32_t rhs_buf = cgen.addBuffer(std::vector{6, 4, 8, 1}); int in0 = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32}); diff --git a/tests/nnfw_api/src/GenModelTests/ModelTestDynamicTensor.test.cc b/tests/nnfw_api/src/GenModelTests/ModelTestDynamicTensor.test.cc index 0f7625bafd3..2ecf6485371 100644 --- a/tests/nnfw_api/src/GenModelTests/ModelTestDynamicTensor.test.cc +++ b/tests/nnfw_api/src/GenModelTests/ModelTestDynamicTensor.test.cc @@ -634,7 +634,6 @@ class CombinationTest1 : public ::testing::Test // test for https://github.com/Samsung/ONE/issues/4625 TEST_F(CombinationTest1, combination_of_set_input_tensorinfo_and_nnfw_run) { - constexpr NNFW_TYPE NNFW_DTYPE = NNFW_TYPE_TENSOR_FLOAT32; std::vector cast_in_buf; std::vector reshape_shape_in_buf; std::vector actual(4), expected(4); @@ -799,8 +798,6 @@ class CombinationTest2 : public ::testing::Test // #1 = input 1 of shape [1] // #2 = add(#0, #1) - constexpr circle::TensorType CIRCLE_DTYPE = circle::TensorType::TensorType_FLOAT32; - int in0 = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32}); int in1 = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32}); int out = cgen.addTensor({{1}, circle::TensorType::TensorType_INT32}); diff --git a/tests/nnfw_api/src/NNPackageTests/AddSessionPrepared.test.cc b/tests/nnfw_api/src/NNPackageTests/AddSessionPrepared.test.cc index 3a93739147f..febe489ff62 100644 --- a/tests/nnfw_api/src/NNPackageTests/AddSessionPrepared.test.cc +++ b/tests/nnfw_api/src/NNPackageTests/AddSessionPrepared.test.cc @@ -72,7 +72,7 @@ TEST_F(ValidationTestAddSessionPrepared, run_async) TEST_F(ValidationTestAddSessionPrepared, set_input_001) { - char input[32]; + char input[32] = ""; ASSERT_EQ(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input, sizeof(input)), NNFW_STATUS_NO_ERROR); } @@ -135,14 +135,14 @@ TEST_F(ValidationTestAddSessionPrepared, neg_set_input_001) TEST_F(ValidationTestAddSessionPrepared, neg_set_input_002) { - char input[1]; // buffer size is too small + char input[1] = ""; // buffer size is too small NNFW_ENSURE_SUCCESS(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input, sizeof(input))); EXPECT_EQ(nnfw_run(_session), NNFW_STATUS_ERROR); } TEST_F(ValidationTestAddSessionPrepared, set_output_001) { - char buffer[32]; + char buffer[32] = ""; ASSERT_EQ(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, buffer, sizeof(buffer)), NNFW_STATUS_NO_ERROR); } @@ -185,7 +185,9 @@ TEST_F(ValidationTestAddSessionPrepared, neg_prepare) TEST_F(ValidationTestAddSessionPrepared, neg_run_without_set_output) { - uint8_t input[4]; + uint8_t input[4] = { + 0, + }; NNFW_ENSURE_SUCCESS(nnfw_set_input(_session, 0, NNFW_TYPE_TENSOR_FLOAT32, input, sizeof(input))); // `nnfw_set_output()` is not called ASSERT_EQ(nnfw_run(_session), NNFW_STATUS_ERROR); diff --git a/tests/nnfw_api/src/NNPackageTests/SingleSession.test.cc b/tests/nnfw_api/src/NNPackageTests/SingleSession.test.cc index fbc6c8a94de..5a9d4caa77b 100644 --- a/tests/nnfw_api/src/NNPackageTests/SingleSession.test.cc +++ b/tests/nnfw_api/src/NNPackageTests/SingleSession.test.cc @@ -52,7 +52,7 @@ TEST_F(ValidationTestSingleSession, neg_set_input_001) TEST_F(ValidationTestSingleSession, neg_set_input_002) { - char input[32]; + char input[32] = ""; ASSERT_EQ(nnfw_set_input(nullptr, 0, NNFW_TYPE_TENSOR_FLOAT32, input, sizeof(input)), NNFW_STATUS_UNEXPECTED_NULL); } @@ -66,7 +66,7 @@ TEST_F(ValidationTestSingleSession, neg_set_output_001) TEST_F(ValidationTestSingleSession, neg_set_output_002) { - char buffer[32]; + char buffer[32] = ""; ASSERT_EQ(nnfw_set_output(nullptr, 0, NNFW_TYPE_TENSOR_FLOAT32, buffer, sizeof(buffer)), NNFW_STATUS_UNEXPECTED_NULL); } diff --git a/tests/tools/onert_run/src/args.cc b/tests/tools/onert_run/src/args.cc index f44ed56769f..b7faaef15ba 100644 --- a/tests/tools/onert_run/src/args.cc +++ b/tests/tools/onert_run/src/args.cc @@ -336,7 +336,7 @@ void Args::Parse(const int argc, char **argv) _fixed_input = _arser.get("--fixed_input"); _force_float = _arser.get("--force_float"); _warmup_runs = _arser.get("--warmup_runs"); - _minmax_runs = _arser.get("--minmax_runs"); + _minmax_runs = _arser.get("--minmax_runs"); _run_delay = _arser.get("--run_delay"); _gpumem_poll = _arser.get("--gpumem_poll"); _mem_poll = _arser.get("--mem_poll"); diff --git a/tests/tools/onert_run/src/args.h b/tests/tools/onert_run/src/args.h index a55cd627a82..acc2c445b47 100644 --- a/tests/tools/onert_run/src/args.h +++ b/tests/tools/onert_run/src/args.h @@ -47,7 +47,7 @@ class Args const std::string &getPackageFilename(void) const { return _package_filename; } const std::string &getModelFilename(void) const { return _model_filename; } - const bool useSingleModel(void) const { return _use_single_model; } + bool useSingleModel(void) const { return _use_single_model; } #if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 const std::string &getDumpFilename(void) const { return _dump_filename; } const std::string &getLoadFilename(void) const { return _load_filename; } @@ -56,23 +56,23 @@ class Args const std::string &getDumpRawFilename(void) const { return _dump_raw_filename; } const std::string &getDumpRawInputFilename(void) const { return _dump_raw_input_filename; } const std::string &getLoadRawFilename(void) const { return _load_raw_filename; } - const int getNumRuns(void) const { return _num_runs; } - const bool getFixedInput(void) const { return _fixed_input; } - const bool getForceFloat(void) const { return _force_float; } - const int getWarmupRuns(void) const { return _warmup_runs; } - const int getMinmaxRuns(void) const { return _minmax_runs; } - const int getRunDelay(void) const { return _run_delay; } + int getNumRuns(void) const { return _num_runs; } + bool getFixedInput(void) const { return _fixed_input; } + bool getForceFloat(void) const { return _force_float; } + int getWarmupRuns(void) const { return _warmup_runs; } + uint32_t getMinmaxRuns(void) const { return _minmax_runs; } + int getRunDelay(void) const { return _run_delay; } std::unordered_map getOutputSizes(void) const { return _output_sizes; } - const bool getGpuMemoryPoll(void) const { return _gpumem_poll; } - const bool getMemoryPoll(void) const { return _mem_poll; } - const bool getWriteReport(void) const { return _write_report; } - const bool printVersion(void) const { return _print_version; } + bool getGpuMemoryPoll(void) const { return _gpumem_poll; } + bool getMemoryPoll(void) const { return _mem_poll; } + bool getWriteReport(void) const { return _write_report; } + bool printVersion(void) const { return _print_version; } TensorShapeMap &getShapeMapForPrepare() { return _shape_prepare; } TensorShapeMap &getShapeMapForRun() { return _shape_run; } TensorShapeMap &getOutputShapeMap() { return _output_shape; } /// @brief Return true if "--shape_run" or "--shape_prepare" is provided bool shapeParamProvided(); - const int getVerboseLevel(void) const { return _verbose_level; } + int getVerboseLevel(void) const { return _verbose_level; } const std::string &getQuantize(void) const { return _quantize; } const std::string &getQuantizedModelPath(void) const { return _quantized_model_path; } const std::string &getCodegen(void) const { return _codegen; } @@ -102,7 +102,7 @@ class Args bool _fixed_input = false; bool _force_float = false; int _warmup_runs; - int _minmax_runs; + uint32_t _minmax_runs; int _run_delay; std::unordered_map _output_sizes; bool _gpumem_poll; diff --git a/tests/tools/onert_run/src/h5formatter.cc b/tests/tools/onert_run/src/h5formatter.cc index bef7e44dec1..9265331272b 100644 --- a/tests/tools/onert_run/src/h5formatter.cc +++ b/tests/tools/onert_run/src/h5formatter.cc @@ -96,7 +96,6 @@ void H5Formatter::loadInputs(const std::string &filename, std::vectorrank; ++i) + for (int32_t i = 0; i < ti->rank; ++i) { assert(ti->dims[i] >= 0); n *= ti->dims[i]; diff --git a/tests/tools/onert_run/src/onert_run.cc b/tests/tools/onert_run/src/onert_run.cc index 1fb452d6783..9b9e69bcfc6 100644 --- a/tests/tools/onert_run/src/onert_run.cc +++ b/tests/tools/onert_run/src/onert_run.cc @@ -68,9 +68,9 @@ std::string genQuantizedModelPathFromModelPath(const std::string &model_path, return model_path.substr(0, extension_pos) + "_quantized_q8wo.circle"; case NNFW_QUANTIZE_TYPE_WO_I16_SYM: return model_path.substr(0, extension_pos) + "_quantized_q16wo.circle"; + default: + throw std::runtime_error{"Invalid quantization type"}; } - - throw std::runtime_error{"Invalid quantization type"}; } std::string genQuantizedModelPathFromPackagePath(const std::string &package_path, @@ -95,9 +95,9 @@ std::string genQuantizedModelPathFromPackagePath(const std::string &package_path return package_path_without_slash + "/" + package_name + "_quantized_q8wo.circle"; case NNFW_QUANTIZE_TYPE_WO_I16_SYM: return package_path_without_slash + "/" + package_name + "_quantized_q16wo.circle"; + default: + throw std::runtime_error{"Invalid quantization type"}; } - - throw std::runtime_error{"Invalid quantization type"}; } int main(const int argc, char **argv) @@ -267,7 +267,7 @@ int main(const int argc, char **argv) { auto &shape = found->second; bool set_input = false; - if (ti.rank != shape.size()) + if (ti.rank != static_cast(shape.size())) { set_input = true; } @@ -454,7 +454,7 @@ int main(const int argc, char **argv) else { TensorShape shape; - for (uint32_t j = 0; j < ti.rank; j++) + for (int32_t j = 0; j < ti.rank; j++) shape.emplace_back(ti.dims[j]); output_shapes.emplace_back(shape); diff --git a/tests/tools/onert_run/src/rawformatter.cc b/tests/tools/onert_run/src/rawformatter.cc index 8dda6e00940..bb4a99f0608 100644 --- a/tests/tools/onert_run/src/rawformatter.cc +++ b/tests/tools/onert_run/src/rawformatter.cc @@ -42,7 +42,7 @@ void RawFormatter::loadInputs(const std::string &filename, std::vector(filesz)) { throw std::runtime_error("Input " + std::to_string(i) + " size does not match: " + std::to_string(bufsz) + diff --git a/tests/tools/onert_train/src/args.cc b/tests/tools/onert_train/src/args.cc index 46f5a6121f4..e55de35450e 100644 --- a/tests/tools/onert_train/src/args.cc +++ b/tests/tools/onert_train/src/args.cc @@ -101,7 +101,7 @@ void checkPackage(const std::string &package_filename) // check the value is in the valid_args list and return the corresponded enum template -T checkValidation(const std::string &arg_name, const std::vector &valid_args, int value) +T checkValidation(const std::string &arg_name, const std::vector &valid_args, uint32_t value) { for (const auto arg : valid_args) { @@ -278,12 +278,13 @@ void Args::Parse(const int argc, char **argv) if (_arser["--learning_rate"]) _learning_rate = _arser.get("--learning_rate"); if (_arser["--loss"]) - _loss_type = checkValidation("loss", valid_loss, _arser.get("--loss")); + _loss_type = checkValidation("loss", valid_loss, _arser.get("--loss")); if (_arser["--loss_reduction_type"]) _loss_reduction_type = checkValidation("loss_reduction_type", valid_loss_rdt, - _arser.get("--loss_reduction_type")); + _arser.get("--loss_reduction_type")); if (_arser["--optimizer"]) - _optimizer_type = checkValidation("optimizer", valid_optim, _arser.get("--optimizer")); + _optimizer_type = + checkValidation("optimizer", valid_optim, _arser.get("--optimizer")); _metric_type = _arser.get("--metric"); _validation_split = _arser.get("--validation_split"); diff --git a/tests/tools/onert_train/src/args.h b/tests/tools/onert_train/src/args.h index 86ad3ff0427..f9c4ff260fa 100644 --- a/tests/tools/onert_train/src/args.h +++ b/tests/tools/onert_train/src/args.h @@ -53,11 +53,11 @@ class Args const std::string &getExportCircleFilename(void) const { return _export_circle_filename; } const std::string &getExportCirclePlusFilename(void) const { return _export_circleplus_filename; } const std::string &getExportCheckpointFilename(void) const { return _export_checkpoint_filename; } - const bool useSingleModel(void) const { return _use_single_model; } + bool useSingleModel(void) const { return _use_single_model; } const std::string &getLoadRawInputFilename(void) const { return _load_raw_input_filename; } const std::string &getLoadRawExpectedFilename(void) const { return _load_raw_expected_filename; } - const bool getMemoryPoll(void) const { return _mem_poll; } - const int getEpoch(void) const { return _epoch; } + bool getMemoryPoll(void) const { return _mem_poll; } + int getEpoch(void) const { return _epoch; } const std::optional getBatchSize(void) const { return _batch_size; } const std::optional getLearningRate(void) const { return _learning_rate; } const std::optional getLossType(void) const { return _loss_type; } @@ -66,10 +66,10 @@ class Args return _loss_reduction_type; } const std::optional getOptimizerType(void) const { return _optimizer_type; } - const int getMetricType(void) const { return _metric_type; } - const float getValidationSplit(void) const { return _validation_split; } - const bool printVersion(void) const { return _print_version; } - const int getVerboseLevel(void) const { return _verbose_level; } + int getMetricType(void) const { return _metric_type; } + float getValidationSplit(void) const { return _validation_split; } + bool printVersion(void) const { return _print_version; } + int getVerboseLevel(void) const { return _verbose_level; } std::unordered_map getOutputSizes(void) const { return _output_sizes; } uint32_t num_of_trainable_ops(void) const { return _num_of_trainable_ops; } diff --git a/tests/tools/onert_train/src/formatter.h b/tests/tools/onert_train/src/formatter.h index 6d256804e72..a328b075b53 100644 --- a/tests/tools/onert_train/src/formatter.h +++ b/tests/tools/onert_train/src/formatter.h @@ -34,7 +34,7 @@ class Formatter Formatter(nnfw_session *sess) : session_(sess) {} virtual void loadInputs(const std::string &filename, std::vector &inputs) = 0; virtual void dumpOutputs(const std::string &filename, std::vector &outputs) = 0; - virtual std::vector readTensorShapes(const std::string &filename) + virtual std::vector readTensorShapes(const std::string &) { return std::vector(); }; diff --git a/tests/tools/onert_train/src/h5formatter.cc b/tests/tools/onert_train/src/h5formatter.cc index 12c570b5dee..c7d35485389 100644 --- a/tests/tools/onert_train/src/h5formatter.cc +++ b/tests/tools/onert_train/src/h5formatter.cc @@ -180,7 +180,7 @@ void H5Formatter::dumpOutputs(const std::string &filename, std::vector dims(ti.rank); - for (uint32_t j = 0; j < ti.rank; ++j) + for (int32_t j = 0; j < ti.rank; ++j) { if (ti.dims[j] >= 0) dims[j] = static_cast(ti.dims[j]); diff --git a/tests/tools/onert_train/src/measure.h b/tests/tools/onert_train/src/measure.h index ab9be905233..95d7864e687 100644 --- a/tests/tools/onert_train/src/measure.h +++ b/tests/tools/onert_train/src/measure.h @@ -161,7 +161,7 @@ class Measure } } - void run(const int epoch, const int step, const std::function &func) + void run(const unsigned int epoch, const unsigned int step, const std::function &func) { if (_step_results.empty() || _step_results.size() <= epoch || _step_results[epoch].size() <= step) @@ -184,7 +184,7 @@ class Measure return sum; } - double timeMicros(const int epoch, const AggregateType aggType) + double timeMicros(const unsigned int epoch, const AggregateType aggType) { if (_step_results.empty() || _step_results.size() <= epoch) { @@ -218,7 +218,7 @@ class Measure << _phase_results[type].time / 1e3 << " ms" << std::endl; if (i == PhaseType::EXECUTE) { - for (int j = 0; j < _step_results.size(); ++j) + for (uint32_t j = 0; j < _step_results.size(); ++j) { std::cout << "- " << "Epoch " << j + 1 << std::setw(12) << std::right << " takes " diff --git a/tests/tools/onert_train/src/metrics.cc b/tests/tools/onert_train/src/metrics.cc index 1cdb05877f9..5c802ad62f0 100644 --- a/tests/tools/onert_train/src/metrics.cc +++ b/tests/tools/onert_train/src/metrics.cc @@ -35,7 +35,7 @@ float Metrics::categoricalAccuracy(const T *output, const T *expected, uint32_t uint64_t size) { int correct = 0; - for (int b = 0; b < batch; ++b) + for (uint32_t b = 0; b < batch; ++b) { int begin_offset = b * size; int end_offset = begin_offset + size; diff --git a/tests/tools/onert_train/src/nnfw_util.cc b/tests/tools/onert_train/src/nnfw_util.cc index 07d3f18dd76..ff80122064f 100644 --- a/tests/tools/onert_train/src/nnfw_util.cc +++ b/tests/tools/onert_train/src/nnfw_util.cc @@ -27,7 +27,7 @@ namespace onert_train uint64_t num_elems(const nnfw_tensorinfo *ti) { uint64_t n = 1; - for (uint32_t i = 0; i < ti->rank; ++i) + for (int32_t i = 0; i < ti->rank; ++i) { assert(ti->dims[i] >= 0); n *= ti->dims[i]; diff --git a/tests/tools/onert_train/src/onert_train.cc b/tests/tools/onert_train/src/onert_train.cc index 62d273624c6..56e66b691f1 100644 --- a/tests/tools/onert_train/src/onert_train.cc +++ b/tests/tools/onert_train/src/onert_train.cc @@ -56,7 +56,7 @@ int main(const int argc, char **argv) } // TODO Apply verbose level to phases - const int verbose = args.getVerboseLevel(); + // const int verbose = args.getVerboseLevel(); // prepare measure tool Measure measure(args.getMemoryPoll()); @@ -244,7 +244,7 @@ int main(const int argc, char **argv) const int num_step = tdata_length / tri.batch_size; const int num_epoch = args.getEpoch(); measure.set(num_epoch, num_step); - for (uint32_t epoch = 0; epoch < num_epoch; ++epoch) + for (int32_t epoch = 0; epoch < num_epoch; ++epoch) { // // TRAINING @@ -252,7 +252,7 @@ int main(const int argc, char **argv) { std::fill(losses.begin(), losses.end(), 0); std::fill(metrics.begin(), metrics.end(), 0); - for (uint32_t n = 0; n < num_step; ++n) + for (int32_t n = 0; n < num_step; ++n) { // get batchsize data if (!tdata_generator(n, input_data, expected_data)) @@ -277,7 +277,7 @@ int main(const int argc, char **argv) // store loss Metrics metric(output_data, expected_data, expected_infos); - for (int32_t i = 0; i < num_expecteds; ++i) + for (uint32_t i = 0; i < num_expecteds; ++i) { float temp = 0.f; NNPR_ENSURE_STATUS(nnfw_train_get_loss(session, i, &temp)); @@ -320,7 +320,7 @@ int main(const int argc, char **argv) std::fill(losses.begin(), losses.end(), 0); std::fill(metrics.begin(), metrics.end(), 0); const int num_valid_step = vdata_length / tri.batch_size; - for (uint32_t n = 0; n < num_valid_step; ++n) + for (int32_t n = 0; n < num_valid_step; ++n) { // get batchsize validation data if (!vdata_generator(n, input_data, expected_data)) @@ -345,7 +345,7 @@ int main(const int argc, char **argv) // get validation loss and accuracy Metrics metric(output_data, expected_data, expected_infos); - for (int32_t i = 0; i < num_expecteds; ++i) + for (uint32_t i = 0; i < num_expecteds; ++i) { float temp = 0.f; NNPR_ENSURE_STATUS(nnfw_train_get_loss(session, i, &temp)); diff --git a/tests/tools/onert_train/src/rawformatter.cc b/tests/tools/onert_train/src/rawformatter.cc index a17071684f1..c94e4ffb7ae 100644 --- a/tests/tools/onert_train/src/rawformatter.cc +++ b/tests/tools/onert_train/src/rawformatter.cc @@ -49,7 +49,7 @@ void RawFormatter::loadInputs(const std::string &filename, std::vector(filesz)) { throw std::runtime_error("Input " + std::to_string(i) + " size does not match: " + std::to_string(bufsz) + diff --git a/tests/tools/onert_train/test/rawdataloader.test.cc b/tests/tools/onert_train/test/rawdataloader.test.cc index 02640141dda..7142f65c190 100644 --- a/tests/tools/onert_train/test/rawdataloader.test.cc +++ b/tests/tools/onert_train/test/rawdataloader.test.cc @@ -196,7 +196,7 @@ TEST_F(RawDataLoaderTest, loadDatas_1) uint32_t num_sample = data_length / batch_size; for (uint32_t i = 0; i < num_sample; ++i) { - auto data = generator(i, inputs, expecteds); + generator(i, inputs, expecteds); std::vector> gen_in(num_input); for (uint32_t h = 0; h < num_input; ++h) diff --git a/tests/tools/tflite_comparator/src/tflite_comparator.cc b/tests/tools/tflite_comparator/src/tflite_comparator.cc index 74316960f86..ea58d007363 100644 --- a/tests/tools/tflite_comparator/src/tflite_comparator.cc +++ b/tests/tools/tflite_comparator/src/tflite_comparator.cc @@ -89,7 +89,7 @@ void randomBoolData(benchmark::RandomGenerator &randgen, std::vector &d inline uint64_t num_elems(const nnfw_tensorinfo *ti) { uint64_t n = 1; - for (uint32_t i = 0; i < ti->rank; ++i) + for (int32_t i = 0; i < ti->rank; ++i) { n *= ti->dims[i]; } @@ -172,7 +172,7 @@ template <> bool isClose(const uint8_t *ref_buf, const std::vector &act_buf, uint32_t index) { // TODO better way for handling quant error? - auto tolerance = static_cast(nnfw::misc::EnvVar("TOLERANCE").asInt(0)); + auto tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(0); bool match = true; for (uint32_t e = 0; e < act_buf.size() / sizeof(uint8_t); e++) @@ -294,6 +294,7 @@ int main(const int argc, char **argv) break; case NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED: randomData(randgen, inputs[i]); + break; case NNFW_TYPE_TENSOR_FLOAT32: randomData(randgen, inputs[i]); break; diff --git a/tests/tools/tflite_run/src/args.h b/tests/tools/tflite_run/src/args.h index 9360e4eda31..761ed488da1 100644 --- a/tests/tools/tflite_run/src/args.h +++ b/tests/tools/tflite_run/src/args.h @@ -34,13 +34,13 @@ class Args const std::string &getCompareFilename(void) const { return _compare_filename; } const std::string &getInputFilename(void) const { return _input_filename; } const std::vector &getInputShapes(void) const { return _input_shapes; } - const int getNumRuns(void) const { return _num_runs; } - const int getWarmupRuns(void) const { return _warmup_runs; } - const int getRunDelay(void) const { return _run_delay; } - const bool getGpuMemoryPoll(void) const { return _gpumem_poll; } - const bool getMemoryPoll(void) const { return _mem_poll; } - const bool getWriteReport(void) const { return _write_report; } - const int getVerboseLevel(void) const { return _verbose_level; } + int getNumRuns(void) const { return _num_runs; } + int getWarmupRuns(void) const { return _warmup_runs; } + int getRunDelay(void) const { return _run_delay; } + bool getGpuMemoryPoll(void) const { return _gpumem_poll; } + bool getMemoryPoll(void) const { return _mem_poll; } + bool getWriteReport(void) const { return _write_report; } + int getVerboseLevel(void) const { return _verbose_level; } private: void Initialize(); diff --git a/tests/tools/tflite_run/src/tensor_loader.cc b/tests/tools/tflite_run/src/tensor_loader.cc index ebd64470d7f..c82fdf239e0 100644 --- a/tests/tools/tflite_run/src/tensor_loader.cc +++ b/tests/tools/tflite_run/src/tensor_loader.cc @@ -67,7 +67,7 @@ void TensorLoader::loadRawInputTensors(const std::string &filename) file.read(reinterpret_cast(_raw_data.get()), file_size); file.close(); - size_t read_bytes = loadInputTensorsFromRawData(); + [[maybe_unused]] size_t read_bytes = loadInputTensorsFromRawData(); // The file size and total output tensor size must match assert(file_size == read_bytes * sizeof(float)); diff --git a/tests/tools/tflite_run/src/tflite_run.cc b/tests/tools/tflite_run/src/tflite_run.cc index 4aa4a3ff202..52041ae8cec 100644 --- a/tests/tools/tflite_run/src/tflite_run.cc +++ b/tests/tools/tflite_run/src/tflite_run.cc @@ -100,7 +100,7 @@ int main(const int argc, char **argv) if (args.getInputShapes().size() != 0) { const auto dim_values = args.getInputShapes().size(); - int32_t offset = 0; + uint32_t offset = 0; auto const input_count = TfLiteInterpreterGetInputTensorCount(interpreter); for (int32_t id = 0; id < input_count; id++)