From 4641cbd4c24169c7ecd7a348f3c5ebaa82e76b96 Mon Sep 17 00:00:00 2001 From: Hyeongseok Oh Date: Thu, 2 Jan 2025 14:56:11 +0900 Subject: [PATCH] [onert/test] Apply strict build to onert_run This commit applies strict build to onert_run. ONE-DCO-1.0-Signed-off-by: Hyeongseok Oh --- tests/tools/onert_run/CMakeLists.txt | 1 + tests/tools/onert_run/src/args.cc | 10 +++---- tests/tools/onert_run/src/args.h | 34 +++++++++++------------ tests/tools/onert_run/src/h5formatter.cc | 1 - tests/tools/onert_run/src/nnfw_util.cc | 4 +-- tests/tools/onert_run/src/onert_run.cc | 18 ++++++------ tests/tools/onert_run/src/rawformatter.cc | 29 ++++++++++--------- tests/tools/onert_run/src/rawformatter.h | 6 ++-- 8 files changed, 53 insertions(+), 50 deletions(-) diff --git a/tests/tools/onert_run/CMakeLists.txt b/tests/tools/onert_run/CMakeLists.txt index ebbc9551ce7..edabe7e48eb 100644 --- a/tests/tools/onert_run/CMakeLists.txt +++ b/tests/tools/onert_run/CMakeLists.txt @@ -27,6 +27,7 @@ endif(HDF5_FOUND) target_include_directories(onert_run PRIVATE src) +target_link_libraries(onert_run nnfw_common) target_link_libraries(onert_run nnfw_lib_tflite jsoncpp) target_link_libraries(onert_run nnfw-dev) target_link_libraries(onert_run arser) diff --git a/tests/tools/onert_run/src/args.cc b/tests/tools/onert_run/src/args.cc index f44ed56769f..0be22b34e59 100644 --- a/tests/tools/onert_run/src/args.cc +++ b/tests/tools/onert_run/src/args.cc @@ -332,12 +332,12 @@ void Args::Parse(const int argc, char **argv) } } - _num_runs = _arser.get("--num_runs"); + _num_runs = _arser.get("--num_runs"); _fixed_input = _arser.get("--fixed_input"); _force_float = _arser.get("--force_float"); - _warmup_runs = _arser.get("--warmup_runs"); - _minmax_runs = _arser.get("--minmax_runs"); - _run_delay = _arser.get("--run_delay"); + _warmup_runs = _arser.get("--warmup_runs"); + _minmax_runs = _arser.get("--minmax_runs"); + _run_delay = _arser.get("--run_delay"); _gpumem_poll = _arser.get("--gpumem_poll"); _mem_poll = _arser.get("--mem_poll"); _write_report = _arser.get("--write_report"); @@ -398,7 +398,7 @@ void Args::Parse(const int argc, char **argv) } } - _verbose_level = _arser.get("--verbose_level"); + _verbose_level = _arser.get("--verbose_level"); if (_arser["--quantize"]) _quantize = _arser.get("--quantize"); diff --git a/tests/tools/onert_run/src/args.h b/tests/tools/onert_run/src/args.h index a55cd627a82..821ecd0888b 100644 --- a/tests/tools/onert_run/src/args.h +++ b/tests/tools/onert_run/src/args.h @@ -47,7 +47,7 @@ class Args const std::string &getPackageFilename(void) const { return _package_filename; } const std::string &getModelFilename(void) const { return _model_filename; } - const bool useSingleModel(void) const { return _use_single_model; } + bool useSingleModel(void) const { return _use_single_model; } #if defined(ONERT_HAVE_HDF5) && ONERT_HAVE_HDF5 == 1 const std::string &getDumpFilename(void) const { return _dump_filename; } const std::string &getLoadFilename(void) const { return _load_filename; } @@ -56,23 +56,23 @@ class Args const std::string &getDumpRawFilename(void) const { return _dump_raw_filename; } const std::string &getDumpRawInputFilename(void) const { return _dump_raw_input_filename; } const std::string &getLoadRawFilename(void) const { return _load_raw_filename; } - const int getNumRuns(void) const { return _num_runs; } - const bool getFixedInput(void) const { return _fixed_input; } - const bool getForceFloat(void) const { return _force_float; } - const int getWarmupRuns(void) const { return _warmup_runs; } - const int getMinmaxRuns(void) const { return _minmax_runs; } - const int getRunDelay(void) const { return _run_delay; } + int32_t getNumRuns(void) const { return _num_runs; } + bool getFixedInput(void) const { return _fixed_input; } + bool getForceFloat(void) const { return _force_float; } + int32_t getWarmupRuns(void) const { return _warmup_runs; } + int32_t getMinmaxRuns(void) const { return _minmax_runs; } + int32_t getRunDelay(void) const { return _run_delay; } std::unordered_map getOutputSizes(void) const { return _output_sizes; } - const bool getGpuMemoryPoll(void) const { return _gpumem_poll; } - const bool getMemoryPoll(void) const { return _mem_poll; } - const bool getWriteReport(void) const { return _write_report; } - const bool printVersion(void) const { return _print_version; } + bool getGpuMemoryPoll(void) const { return _gpumem_poll; } + bool getMemoryPoll(void) const { return _mem_poll; } + bool getWriteReport(void) const { return _write_report; } + bool printVersion(void) const { return _print_version; } TensorShapeMap &getShapeMapForPrepare() { return _shape_prepare; } TensorShapeMap &getShapeMapForRun() { return _shape_run; } TensorShapeMap &getOutputShapeMap() { return _output_shape; } /// @brief Return true if "--shape_run" or "--shape_prepare" is provided bool shapeParamProvided(); - const int getVerboseLevel(void) const { return _verbose_level; } + int32_t getVerboseLevel(void) const { return _verbose_level; } const std::string &getQuantize(void) const { return _quantize; } const std::string &getQuantizedModelPath(void) const { return _quantized_model_path; } const std::string &getCodegen(void) const { return _codegen; } @@ -98,18 +98,18 @@ class Args TensorShapeMap _shape_prepare; TensorShapeMap _shape_run; TensorShapeMap _output_shape; - int _num_runs; + int32_t _num_runs; bool _fixed_input = false; bool _force_float = false; - int _warmup_runs; - int _minmax_runs; - int _run_delay; + int32_t _warmup_runs; + int32_t _minmax_runs; + int32_t _run_delay; std::unordered_map _output_sizes; bool _gpumem_poll; bool _mem_poll; bool _write_report; bool _print_version = false; - int _verbose_level; + int32_t _verbose_level; bool _use_single_model = false; std::string _quantize; std::string _quantized_model_path; diff --git a/tests/tools/onert_run/src/h5formatter.cc b/tests/tools/onert_run/src/h5formatter.cc index bef7e44dec1..9265331272b 100644 --- a/tests/tools/onert_run/src/h5formatter.cc +++ b/tests/tools/onert_run/src/h5formatter.cc @@ -96,7 +96,6 @@ void H5Formatter::loadInputs(const std::string &filename, std::vectorrank; ++i) + for (int32_t i = 0; i < ti->rank; ++i) { assert(ti->dims[i] >= 0); n *= ti->dims[i]; @@ -33,7 +33,7 @@ uint64_t num_elems(const nnfw_tensorinfo *ti) uint64_t bufsize_for(const nnfw_tensorinfo *ti) { - static int elmsize[] = { + static uint32_t elmsize[] = { sizeof(float), /* NNFW_TYPE_TENSOR_FLOAT32 */ sizeof(int), /* NNFW_TYPE_TENSOR_INT32 */ sizeof(uint8_t), /* NNFW_TYPE_TENSOR_QUANT8_ASYMM */ diff --git a/tests/tools/onert_run/src/onert_run.cc b/tests/tools/onert_run/src/onert_run.cc index 24bf991215e..d9ab14d3858 100644 --- a/tests/tools/onert_run/src/onert_run.cc +++ b/tests/tools/onert_run/src/onert_run.cc @@ -68,9 +68,9 @@ std::string genQuantizedModelPathFromModelPath(const std::string &model_path, return model_path.substr(0, extension_pos) + "_quantized_q8wo.circle"; case NNFW_QUANTIZE_TYPE_WO_I16_SYM: return model_path.substr(0, extension_pos) + "_quantized_q16wo.circle"; + default: + throw std::runtime_error{"Invalid quantization type"}; } - - throw std::runtime_error{"Invalid quantization type"}; } std::string genQuantizedModelPathFromPackagePath(const std::string &package_path, @@ -95,9 +95,9 @@ std::string genQuantizedModelPathFromPackagePath(const std::string &package_path return package_path_without_slash + "/" + package_name + "_quantized_q8wo.circle"; case NNFW_QUANTIZE_TYPE_WO_I16_SYM: return package_path_without_slash + "/" + package_name + "_quantized_q16wo.circle"; + default: + throw std::runtime_error{"Invalid quantization type"}; } - - throw std::runtime_error{"Invalid quantization type"}; } int main(const int argc, char **argv) @@ -121,7 +121,7 @@ int main(const int argc, char **argv) #endif // TODO Apply verbose level to phases - const int verbose = args.getVerboseLevel(); + const auto verbose = args.getVerboseLevel(); benchmark::Phases phases( benchmark::PhaseOption{args.getMemoryPoll(), args.getGpuMemoryPoll(), args.getRunDelay()}); @@ -188,7 +188,7 @@ int main(const int argc, char **argv) auto random_generator = RandomGenerator(); nnfw_set_execute_config(session, NNFW_RUN_CONFIG_DUMP_MINMAX, nullptr); - for (uint32_t i = 0; i < args.getMinmaxRuns(); i++) + for (int32_t i = 0; i < args.getMinmaxRuns(); i++) { random_generator.generate(inputs); NNPR_ENSURE_STATUS(nnfw_run(session)); @@ -266,7 +266,7 @@ int main(const int argc, char **argv) { auto &shape = found->second; bool set_input = false; - if (ti.rank != shape.size()) + if (ti.rank != static_cast(shape.size())) { set_input = true; } @@ -285,7 +285,7 @@ int main(const int argc, char **argv) if (set_input) { ti.rank = shape.size(); - for (int i = 0; i < ti.rank; i++) + for (int32_t i = 0; i < ti.rank; i++) ti.dims[i] = shape.at(i); NNPR_ENSURE_STATUS(nnfw_set_input_tensorinfo(session, i, &ti)); } @@ -453,7 +453,7 @@ int main(const int argc, char **argv) else { TensorShape shape; - for (uint32_t j = 0; j < ti.rank; j++) + for (int32_t j = 0; j < ti.rank; j++) shape.emplace_back(ti.dims[j]); output_shapes.emplace_back(shape); diff --git a/tests/tools/onert_run/src/rawformatter.cc b/tests/tools/onert_run/src/rawformatter.cc index 8dda6e00940..f2b085618d7 100644 --- a/tests/tools/onert_run/src/rawformatter.cc +++ b/tests/tools/onert_run/src/rawformatter.cc @@ -19,12 +19,13 @@ #include "nnfw_util.h" #include +#include #include #include namespace onert_run { -void RawFormatter::loadInputs(const std::string &filename, std::vector &inputs) +void RawFormatter::loadInputs(const std::string &prefix, std::vector &inputs) { uint32_t num_inputs = inputs.size(); @@ -39,16 +40,16 @@ void RawFormatter::loadInputs(const std::string &filename, std::vector(inputs[i].data()), filesz); file.close(); } @@ -60,19 +61,20 @@ void RawFormatter::loadInputs(const std::string &filename, std::vector &outputs) +void RawFormatter::dumpOutputs(const std::string &prefix, const std::vector &outputs) { uint32_t num_outputs = outputs.size(); try { for (uint32_t i = 0; i < num_outputs; i++) { - auto bufsz = outputs[i].size(); + const auto bufsz = outputs[i].size(); + const auto filename = prefix + "." + std::to_string(i); - std::ofstream file(filename + "." + std::to_string(i), std::ios::out | std::ios::binary); + std::ofstream file(filename, std::ios::out | std::ios::binary); file.write(reinterpret_cast(outputs[i].data()), bufsz); file.close(); - std::cerr << filename + "." + std::to_string(i) + " is generated.\n"; + std::cout << filename + " is generated.\n"; } } catch (const std::runtime_error &e) @@ -82,19 +84,20 @@ void RawFormatter::dumpOutputs(const std::string &filename, const std::vector &inputs) +void RawFormatter::dumpInputs(const std::string &prefix, const std::vector &inputs) { uint32_t num_inputs = inputs.size(); try { for (uint32_t i = 0; i < num_inputs; i++) { - auto bufsz = inputs[i].size(); + const auto bufsz = inputs[i].size(); + const auto filename = prefix + "." + std::to_string(i); - std::ofstream file(filename + "." + std::to_string(i), std::ios::out | std::ios::binary); + std::ofstream file(filename, std::ios::out | std::ios::binary); file.write(reinterpret_cast(inputs[i].data()), bufsz); file.close(); - std::cerr << filename + "." + std::to_string(i) + " is generated.\n"; + std::cout << filename + " is generated.\n"; } } catch (const std::runtime_error &e) diff --git a/tests/tools/onert_run/src/rawformatter.h b/tests/tools/onert_run/src/rawformatter.h index 79741e9defc..a34c0ce0125 100644 --- a/tests/tools/onert_run/src/rawformatter.h +++ b/tests/tools/onert_run/src/rawformatter.h @@ -31,9 +31,9 @@ class RawFormatter { public: RawFormatter() = default; - void loadInputs(const std::string &filename, std::vector &inputs); - void dumpOutputs(const std::string &filename, const std::vector &outputs); - void dumpInputs(const std::string &filename, const std::vector &inputs); + void loadInputs(const std::string &prefix, std::vector &inputs); + void dumpOutputs(const std::string &prefix, const std::vector &outputs); + void dumpInputs(const std::string &prefix, const std::vector &inputs); }; } // namespace onert_run