diff --git a/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/FloatPReluKernel.h b/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/FloatPReluKernel.h new file mode 100644 index 00000000000..01a256f013b --- /dev/null +++ b/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/FloatPReluKernel.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_TEST_MODELS_PRELU_KERNEL_FLOAT_H +#define LUCI_INTERPRETER_TEST_MODELS_PRELU_KERNEL_FLOAT_H + +#include "TestDataPReluBase.h" + +namespace luci_interpreter +{ +namespace test_kernel +{ +namespace prelu_float +{ + +/* + * PRelu Kernel: + * + * Input_1(2, 5) Input_2(2, 1) + * \ / + * PRelu(with broadcast) + * | + * Output(2, 5) + */ +const unsigned char test_kernel_model_circle[] = { + 0x18, 0x00, 0x00, 0x00, 0x43, 0x49, 0x52, 0x30, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x34, 0x00, 0x00, 0x00, 0x58, 0x01, 0x00, 0x00, 0x74, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0xf4, 0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0x04, 0x00, 0x04, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, + 0x18, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, + 0x58, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, + 0x34, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xa8, 0xff, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x6f, 0x66, 0x6d, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0xd4, 0xff, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x69, 0x66, 0x6d, 0x32, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x69, 0x66, 0x6d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x11, 0x00, 0x00, 0x00, + 0x4f, 0x4e, 0x45, 0x2d, 0x74, 0x66, 0x6c, 0x69, 0x74, 0x65, 0x32, 0x63, 0x69, 0x72, 0x63, 0x6c, + 0x65, 0x00, 0x00, 0x00}; + +const std::vector input1_data = { + 8.028845, -2.256134, 5.871517, 3.6350298, -1.1245594, -8.281773, 2.9255466, -5.459926, + 13.205927, 2.6914148, 7.8167467, -3.0257807, -3.5172105, 12.896618, -4.804066, -1.2078629, + -3.355052, 2.079568, 11.507849, -4.21969, 5.263505, 5.169158, 8.4644985, 6.6823874, + -12.781622, -2.936924, 12.260782, 0.6924944, 13.423675, 8.054682, -7.9333363, 7.8090687, + -17.695854, -3.7374554, 6.134268, 4.91667, -18.230255, -3.6263142, -10.009817, -4.900553, + -2.6339025, 10.807385, 8.37227, -2.2637882, -3.136509, 1.1572075, -5.7503166, 6.0806875}; +const std::vector input2_data = {-1.646285, 2.2777863, 8.24495}; +const std::vector reference_output_data = { + 8.028845, -5.138991, 5.871517, 3.6350298, -2.561506, -68.28281, 2.9255466, -12.436544, + 13.205927, 2.6914148, 7.8167467, -24.94741, 5.790331, 12.896618, -39.609287, 1.9884865, + -7.6420913, 2.079568, 11.507849, -9.611551, 5.263505, 5.169158, 8.4644985, 6.6823874, + 21.042192, -6.689685, 12.260782, 0.6924944, 13.423675, 8.054682, 13.060533, 7.8090687, + -145.90144, 6.152917, 6.134268, 4.91667, 30.012197, -8.259969, -82.53044, 8.067708, + -5.999467, 10.807385, 8.37227, -5.1564255, -25.860361, 1.1572075, -13.097992, 6.0806875}; + +} // namespace prelu_float + +class TestDataFloatPRelu : public TestDataPReluBase +{ +public: + explicit TestDataFloatPRelu() : TestDataPReluBase() + { + _input1_data = prelu_float::input1_data; + _input2_data = prelu_float::input2_data; + _reference_output_data = prelu_float::reference_output_data; + _test_kernel_model_circle = prelu_float::test_kernel_model_circle; + } + + ~TestDataFloatPRelu() override = default; +}; + +} // namespace test_kernel +} // namespace luci_interpreter + +#endif // LUCI_INTERPRETER_TEST_MODELS_PRELU_KERNEL_FLOAT_H diff --git a/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/NegPReluKernel.h b/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/NegPReluKernel.h new file mode 100644 index 00000000000..99e36a0bb8c --- /dev/null +++ b/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/NegPReluKernel.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_TEST_MODELS_NEG_PRELU_KERNEL_H +#define LUCI_INTERPRETER_TEST_MODELS_NEG_PRELU_KERNEL_H + +#include "luci_interpreter/test_models/TestDataBase.h" + +namespace luci_interpreter +{ +namespace test_kernel +{ +namespace neg_inputs_type_mismatch_prelu_kernel +{ +/* + * PRelu Kernel with inputs type mismatch: + * + * Input_1(1, 4, 4, 3)-Int Input_2(1, 1, 3)-Float + * \ / + * PRelu + * | + * Output(1, 4, 4, 3)-Float + */ +const unsigned char test_kernel_model_circle[] = { + 0x18, 0x00, 0x00, 0x00, 0x43, 0x49, 0x52, 0x30, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x34, 0x00, 0x00, 0x00, 0x68, 0x01, 0x00, 0x00, 0x84, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0xf4, 0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0x04, 0x00, 0x04, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, + 0x18, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, + 0x58, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x98, 0xff, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x6f, 0x66, 0x6d, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x08, 0x00, 0x04, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x69, 0x66, 0x6d, 0x32, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x69, 0x66, 0x6d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x11, 0x00, 0x00, 0x00, + 0x4f, 0x4e, 0x45, 0x2d, 0x74, 0x66, 0x6c, 0x69, 0x74, 0x65, 0x32, 0x63, 0x69, 0x72, 0x63, 0x6c, + 0x65, 0x00, 0x00, 0x00}; +} // namespace neg_inputs_type_mismatch_prelu_kernel + +namespace neg_input_output_type_mismatch_prelu_kernel +{ +/* + * PRelu Kernel with input output types mismatch: + * + * Input_1(1, 4, 4, 3)-Float Input_2(1, 1, 3)-Float + * \ / + * PRelu(no broadcast) + * | + * Output(1, 4, 4, 3)-Int + */ +const unsigned char test_kernel_model_circle[] = { + 0x18, 0x00, 0x00, 0x00, 0x43, 0x49, 0x52, 0x30, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x08, 0x00, 0x10, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x34, 0x00, 0x00, 0x00, 0x68, 0x01, 0x00, 0x00, 0x84, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0xf4, 0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0x04, 0x00, 0x04, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, + 0x18, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, + 0x58, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x44, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, + 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x6f, 0x66, 0x6d, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0xd4, 0xff, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x69, 0x66, 0x6d, 0x32, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x69, 0x66, 0x6d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x11, 0x00, 0x00, 0x00, + 0x4f, 0x4e, 0x45, 0x2d, 0x74, 0x66, 0x6c, 0x69, 0x74, 0x65, 0x32, 0x63, 0x69, 0x72, 0x63, 0x6c, + 0x65, 0x00, 0x00, 0x00}; +} // namespace neg_input_output_type_mismatch_prelu_kernel + +class NegTestDataInputsTypeMismatchPReluKernel : public NegTestDataBase +{ +public: + NegTestDataInputsTypeMismatchPReluKernel() + { + _test_kernel_model_circle = neg_inputs_type_mismatch_prelu_kernel::test_kernel_model_circle; + } + + ~NegTestDataInputsTypeMismatchPReluKernel() override = default; + + const unsigned char *get_model_ptr() override final { return _test_kernel_model_circle; } + +protected: + const unsigned char *_test_kernel_model_circle; +}; + +class NegTestDataInputOutputTypeMismatchPReluKernel : public NegTestDataBase +{ +public: + NegTestDataInputOutputTypeMismatchPReluKernel() + { + _test_kernel_model_circle = + neg_input_output_type_mismatch_prelu_kernel::test_kernel_model_circle; + } + + ~NegTestDataInputOutputTypeMismatchPReluKernel() override = default; + + const unsigned char *get_model_ptr() override final { return _test_kernel_model_circle; } + +protected: + const unsigned char *_test_kernel_model_circle; +}; + +} // namespace test_kernel +} // namespace luci_interpreter + +#endif // LUCI_INTERPRETER_TEST_MODELS_NEG_PRelu_KERNEL_H diff --git a/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/TestDataPReluBase.h b/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/TestDataPReluBase.h new file mode 100644 index 00000000000..8ce4bb8df39 --- /dev/null +++ b/onert-micro/luci-interpreter/include/luci_interpreter/test_models/prelu/TestDataPReluBase.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_TEST_MODELS_PRELU_KERNEL_BASE_H +#define LUCI_INTERPRETER_TEST_MODELS_PRELU_KERNEL_BASE_H + +#include "luci_interpreter/test_models/TestDataBase.h" + +namespace luci_interpreter +{ +namespace test_kernel +{ + +template class TestDataPReluBase : public TestDataBase +{ +public: + TestDataPReluBase() = default; + + const unsigned char *get_model_ptr() override final { return _test_kernel_model_circle; } + + const std::vector &get_input_data_by_index(int i) override final + { + switch (i) + { + case 0: + return _input1_data; + case 1: + return _input2_data; + default: + assert(false && "Wrong input index"); + } + } + + const std::vector &get_output_data_by_index(int i) override final + { + assert(i == 0); + return _reference_output_data; + } + +protected: + std::vector _input1_data; + std::vector _input2_data; + std::vector _reference_output_data; + const unsigned char *_test_kernel_model_circle; +}; + +} // namespace test_kernel +} // namespace luci_interpreter + +#endif // LUCI_INTERPRETER_TEST_MODELS_PRelu_KERNEL_BASE_H diff --git a/onert-micro/luci-interpreter/pal/common/PALPreluCommon.h b/onert-micro/luci-interpreter/pal/common/PALPreluCommon.h new file mode 100644 index 00000000000..b4ee7f1b907 --- /dev/null +++ b/onert-micro/luci-interpreter/pal/common/PALPreluCommon.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LUCI_INTERPRETER_PAL_RELU_COMMON_H +#define LUCI_INTERPRETER_PAL_RELU_COMMON_H + +#include "PALUtils.h" +#include "Params.h" +#include "ProcessBroadcastShapes.h" + +namespace luci_interpreter_pal +{ + +void BroadcastPrelu4DSlowFloat(const luci_interpreter::RuntimeShape &unextended_input1_shape, + const float *input1_data, + const luci_interpreter::RuntimeShape &unextended_input2_shape, + const float *input2_data, + const luci_interpreter::RuntimeShape &unextended_output_shape, + float *output_data) +{ + const luci_interpreter::RuntimeShape output_shape = + luci_interpreter::RuntimeShape::extendedShape(4, unextended_output_shape); + + NdArrayDesc<4> desc1; + NdArrayDesc<4> desc2; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, unextended_input2_shape, &desc1, + &desc2); + + for (int b = 0; b < output_shape.dims(0); ++b) + { + for (int y = 0; y < output_shape.dims(1); ++y) + { + for (int x = 0; x < output_shape.dims(2); ++x) + { + for (int c = 0; c < output_shape.dims(3); ++c) + { + auto out_idx = offset(output_shape.dimsData(), b, y, x, c); + auto in1_idx = subscriptToIndex(desc1, b, y, x, c); + auto in2_idx = subscriptToIndex(desc2, b, y, x, c); + auto in1_val = input1_data[in1_idx]; + auto in2_val = input2_data[in2_idx]; + output_data[out_idx] = in1_val >= 0.0f ? in1_val : in1_val * in2_val; + } + } + } + } +} + +} // namespace luci_interpreter_pal + +#endif // LUCI_INTERPRETER_PAL_RELU_COMMON_H diff --git a/onert-micro/luci-interpreter/pal/common/Params.h b/onert-micro/luci-interpreter/pal/common/Params.h index c3032d61dd7..6ec100031ca 100644 --- a/onert-micro/luci-interpreter/pal/common/Params.h +++ b/onert-micro/luci-interpreter/pal/common/Params.h @@ -118,6 +118,17 @@ struct TransposeParams int32_t perm[5]; }; +struct PreluParams +{ + int32_t input_offset; + int32_t alpha_offset; + int32_t output_offset; + int32_t output_multiplier_1; + int output_shift_1; + int32_t output_multiplier_2; + int output_shift_2; +}; + struct ComparisonParams { // uint8_t inference params. diff --git a/onert-micro/luci-interpreter/pal/mcu/KernelsToBuild.lst b/onert-micro/luci-interpreter/pal/mcu/KernelsToBuild.lst index c8179dea19e..7ba4e947552 100644 --- a/onert-micro/luci-interpreter/pal/mcu/KernelsToBuild.lst +++ b/onert-micro/luci-interpreter/pal/mcu/KernelsToBuild.lst @@ -20,6 +20,7 @@ REGISTER_KERNEL(FILL, Fill) REGISTER_KERNEL(PACK, Pack) REGISTER_KERNEL(PAD, Pad) REGISTER_KERNEL(PADV2, PadV2) +REGISTER_KERNEL(PRELU, PRelu) REGISTER_KERNEL(RESHAPE, Reshape) REGISTER_KERNEL(RELU, Relu) REGISTER_KERNEL(RELU6, Relu6) diff --git a/onert-micro/luci-interpreter/src/kernels/PRelu.cpp b/onert-micro/luci-interpreter/src/kernels/PRelu.cpp index 3d64215c5cc..9ea7c70c8cf 100644 --- a/onert-micro/luci-interpreter/src/kernels/PRelu.cpp +++ b/onert-micro/luci-interpreter/src/kernels/PRelu.cpp @@ -14,197 +14,56 @@ * limitations under the License. */ -#include "kernels/PRelu.h" - -#include "kernels/BinaryOpCommon.h" +#include "Builders.h" #include "kernels/Utils.h" +#include "TISOKernel.h" -#include -#include +#include "PALPreluCommon.h" namespace luci_interpreter { -namespace kernels +void configure_kernel_CirclePRelu(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph) { + kernels::TISOKernel kernel(cur_op, runtime_graph); -PRelu::PRelu(const Tensor *input, const Tensor *alpha, Tensor *output) - : Kernel({input, alpha}, {output}) -{ -} + LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input1()) == + Tensor::element_type(kernel.output())); + LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input1()) == + Tensor::element_type(kernel.input2())); -PRelu::~PRelu() -{ - // Destructor declared to delete vector of alpha quantized data properly + LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input1()) == Tensor::num_dims(kernel.output())); + LUCI_INTERPRETER_CHECK(Tensor::num_elements(kernel.input1()) == + Tensor::num_elements(kernel.output())); } -void PRelu::configure() +void execute_kernel_CirclePRelu(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph) { - LUCI_INTERPRETER_CHECK(input()->element_type() == output()->element_type()); - LUCI_INTERPRETER_CHECK(alpha()->element_type() == output()->element_type()); - LUCI_INTERPRETER_CHECK(input()->scales().size() <= 1); - LUCI_INTERPRETER_CHECK(output()->scales().size() <= 1); + kernels::TISOKernel kernel(cur_op, runtime_graph); + kernels::TISOData kernel_data = kernel.readData(); - if (input()->element_type() == DataType::U8) - { - LUCI_INTERPRETER_CHECK(alpha()->scales().size() <= 1); // remove when CWQ kernel arrives - _alpha_multipliers.resize(1); - double alpha_multiplier = input()->scale() * alpha()->scale() / output()->scale(); - quantizeMultiplier(alpha_multiplier, &_alpha_multipliers[0].multiplier, - &_alpha_multipliers[0].shift); - double identity_multiplier = input()->scale() / output()->scale(); - quantizeMultiplier(identity_multiplier, &_output_multiplier_identity, &_output_shift_identity); - } - else if (input()->element_type() == DataType::S16) - { - // Common check for correctness of quant params - LUCI_INTERPRETER_CHECK(input()->zero_point() == 0 && output()->zero_point() == 0); - for (size_t channel = 0; channel < alpha()->zero_points().size(); ++channel) - { - LUCI_INTERPRETER_CHECK(alpha()->zero_points()[channel] == 0); - } - // PRelu specific checks for CWQ - LUCI_INTERPRETER_CHECK(alpha()->quantized_dimension() == alpha()->shape().num_dims() - 1); - LUCI_INTERPRETER_CHECK(static_cast(alpha()->scales().size()) == - alpha()->shape().dim(alpha()->quantized_dimension())); - LUCI_INTERPRETER_CHECK(alpha()->shape().num_elements() == - input()->shape().dim(input()->shape().num_dims() - 1)); - - // all dimension of alpha except last one should be size 1 - for (int dim = 0; dim < alpha()->shape().num_dims() - 1; ++dim) - { - LUCI_INTERPRETER_CHECK(alpha()->shape().dim(dim) == 1); - } - - std::vector real_multipliers = - getQuantizedConvolutionMultiplers(input()->scale(), alpha()->scales(), output()->scale()); - - _alpha_multipliers = quantizeMultipliers(real_multipliers); - - double identity_multiplier = input()->scale() / output()->scale(); - quantizeMultiplier(identity_multiplier, &_output_multiplier_identity, &_output_shift_identity); - } - // TODO: enable it only if kernel with dynamic shapes - output()->resize(calculateShapeForBroadcast(input()->shape(), alpha()->shape())); -} - -void PRelu::execute() const -{ - switch (input()->element_type()) + switch (Tensor::element_type(kernel.input1())) { +#ifndef DIS_FLOAT case DataType::FLOAT32: - evalFloat(); - break; - case DataType::U8: - evalQuantized(); - break; - case DataType::S16: - evalQuantizedS16(); - break; - default: - assert(false && "Unsupported type."); - } -} - -void PRelu::evalFloat() const -{ - const auto input_data = getTensorData(input()); - const auto alpha_data = getTensorData(alpha()); - const auto size = getTensorShape(input()).FlatSize(); - auto output_data = getTensorData(output()); - - auto PReluFunc = [](float input, float alpha) { return input >= 0.0 ? input : input * alpha; }; - - if (input()->shape() != alpha()->shape()) - { - tflite::reference_ops::BroadcastBinaryFunction4DSlow( - getTensorShape(input()), getTensorData(input()), getTensorShape(alpha()), - getTensorData(alpha()), getTensorShape(output()), getTensorData(output()), - PReluFunc); - } - else - { - for (auto i = decltype(size){0}; i < size; ++i) { - if (input_data[i] >= 0) - output_data[i] = input_data[i]; - else - output_data[i] = input_data[i] * alpha_data[i]; + const float *input_data_float = kernels::getTensorData(kernel_data.input1_data); + const float *alpha_data_float = kernels::getTensorData(kernel_data.input2_data); + float *output_data_float = kernels::getTensorData(kernel_data.output_data); + assert(output_data_float); + assert(input_data_float); + assert(alpha_data_float); + + luci_interpreter_pal::BroadcastPrelu4DSlowFloat( + kernels::getTensorRuntimeShape(kernel.input1(), runtime_graph), input_data_float, + kernels::getTensorRuntimeShape(kernel.input2(), runtime_graph), alpha_data_float, + kernels::getTensorRuntimeShape(kernel.output(), runtime_graph), output_data_float); + break; } +#endif // DIS_FLOAT + default: + assert(false && "Unsupported type"); } } -void PRelu::evalQuantized() const -{ - tflite::PreluParams op_params{}; - - op_params.input_offset = -input()->zero_point(); // Note the '-'. - op_params.alpha_offset = -alpha()->zero_point(); // Note the '-'. - op_params.output_offset = output()->zero_point(); - op_params.output_shift_1 = _output_shift_identity; - op_params.output_multiplier_1 = _output_multiplier_identity; - op_params.output_shift_2 = _alpha_multipliers[0].shift; - op_params.output_multiplier_2 = _alpha_multipliers[0].multiplier; - - if (input()->shape() != alpha()->shape()) - { - tflite::reference_ops::BroadcastPrelu4DSlow( - op_params, getTensorShape(input()), getTensorData(input()), getTensorShape(alpha()), - getTensorData(alpha()), getTensorShape(output()), getTensorData(output())); - } - else - { - tflite::reference_ops::Prelu( - op_params, getTensorShape(input()), getTensorData(input()), getTensorShape(alpha()), - getTensorData(alpha()), getTensorShape(output()), getTensorData(output())); - } -} - -static inline int16_t evalElemS16PRelu(int16_t input_val, int16_t alpha_val, - const ChannelQuantMultipliers &identity_mult, - const ChannelQuantMultipliers &alpha_mult) -{ - constexpr int32_t quantized_min = std::numeric_limits::min(); - constexpr int32_t quantized_max = std::numeric_limits::max(); - - const int32_t output_val = - input_val >= 0 - ? tflite::MultiplyByQuantizedMultiplier(static_cast(input_val), - identity_mult.multiplier, identity_mult.shift) - : tflite::MultiplyByQuantizedMultiplier(static_cast(input_val * alpha_val), - alpha_mult.multiplier, alpha_mult.shift); - const int32_t clamped_output = std::min(quantized_max, std::max(quantized_min, output_val)); - return clamped_output; -} - -void PRelu::evalQuantizedS16() const -{ - // Note that this kernel assumes alpha is CWQ - tflite::RuntimeShape input_shape = getTensorShape(input()); - const int16_t *input_data = input()->data(); - const int16_t *alpha_data = alpha()->data(); - int16_t *output_data = output()->data(); - - const ChannelQuantMultipliers pos_mult{_output_shift_identity, _output_multiplier_identity}; - - const int last_dim = input()->shape().num_dims() - 1; - - int32_t outer_dims_size = 1; - for (int i = 0; i < last_dim; ++i) - outer_dims_size *= input_shape.Dims(i); - int32_t quant_dim_size = input_shape.Dims(last_dim); - - for (int32_t outer_dims = 0; outer_dims < outer_dims_size; ++outer_dims) - for (int32_t quant_channel = 0; quant_channel < quant_dim_size; ++quant_channel) - { - const ChannelQuantMultipliers &neg_mult = _alpha_multipliers[quant_channel]; - size_t offset = static_cast(outer_dims) * static_cast(quant_dim_size); - offset += quant_channel; - - output_data[offset] = - evalElemS16PRelu(input_data[offset], alpha_data[quant_channel], pos_mult, neg_mult); - } -} - -} // namespace kernels } // namespace luci_interpreter diff --git a/onert-micro/luci-interpreter/src/kernels/PRelu.h b/onert-micro/luci-interpreter/src/kernels/PRelu.h deleted file mode 100644 index f7735d418ff..00000000000 --- a/onert-micro/luci-interpreter/src/kernels/PRelu.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LUCI_INTERPRETER_KERNELS_PRELU_H -#define LUCI_INTERPRETER_KERNELS_PRELU_H - -#include "core/Kernel.h" -#include - -namespace luci_interpreter -{ -namespace kernels -{ - -class ChannelQuantMultipliers; - -class PRelu : public Kernel -{ -public: - PRelu(const Tensor *input, const Tensor *alpha, Tensor *output); - - ~PRelu(); - - const Tensor *input() const { return _inputs[0]; } - const Tensor *alpha() const { return _inputs[1]; } - Tensor *output() const { return _outputs[0]; } - - void configure() override; - void execute() const override; - -private: - void evalFloat() const; - void evalQuantized() const; - void evalQuantizedS16() const; - -private: - std::vector _alpha_multipliers; - // TODO merge this into one ChannelQuantMultiplier object - int32_t _output_multiplier_identity = 0; - int _output_shift_identity = 0; -}; - -} // namespace kernels -} // namespace luci_interpreter - -#endif // LUCI_INTERPRETER_KERNELS_PRELU_H diff --git a/onert-micro/luci-interpreter/src/kernels/PRelu.test.cpp b/onert-micro/luci-interpreter/src/kernels/PRelu.test.cpp index 6d97382de5a..533bc817039 100644 --- a/onert-micro/luci-interpreter/src/kernels/PRelu.test.cpp +++ b/onert-micro/luci-interpreter/src/kernels/PRelu.test.cpp @@ -15,383 +15,92 @@ * limitations under the License. */ -#include "kernels/PRelu.h" #include "kernels/TestUtils.h" -#include "luci_interpreter/TestMemoryManager.h" +#include "luci_interpreter/test_models/prelu/FloatPReluKernel.h" +#include "luci_interpreter/test_models/prelu/NegPReluKernel.h" + +#include "loader/ModuleLoader.h" namespace luci_interpreter { -namespace kernels -{ namespace { using namespace testing; -template -void Check(std::initializer_list input_shape, std::initializer_list alpha_shape, - std::initializer_list output_shape, std::initializer_list input_data, - std::initializer_list alpha_data, std::initializer_list output_data) -{ - std::unique_ptr memory_manager = std::make_unique(); - constexpr DataType element_type = getElementType(); - Tensor input_tensor = - makeInputTensor(input_shape, input_data, memory_manager.get()); - Tensor alpha_tensor = - makeInputTensor(alpha_shape, alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(element_type); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - kernel.execute(); - - EXPECT_THAT(extractTensorData(output_tensor), ::testing::ElementsAreArray(output_data)); - EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape)); -} - -TEST(PReluTest, FloatSimple) -{ - Check(/*input_shape=*/{2, 3}, /*alpha_shape=*/{2, 3}, - /*output_shape=*/{2, 3}, - /*input_data=*/ - { - 0.0f, 1.0f, 3.0f, // Row 1 - 1.0f, -1.0f, -2.0f, // Row 2 - }, - /*alpha_data=*/ - { - 0.0f, 0.5f, 0.1f, // Row 1 - 0.0f, 0.5f, 0.1f, // Row 2 - }, - /*output_data=*/ - { - 0.0f, 1.0f, 3.0f, // Row 1 - 1.0f, -0.5f, -0.2f, // Row 2 - }); - - SUCCEED(); -} - -TEST(PReluTest, FloatBroadcast) -{ - Check(/*input_shape=*/{1, 2, 2, 3}, /*alpha_shape=*/{1, 1, 3}, - /*output_shape=*/{1, 2, 2, 3}, - /*input_data=*/ - { - 0.0f, 0.0f, 0.0f, // Row 1, Column 1 - 1.0f, 1.0f, 1.0f, // Row 1, Column 2 - -1.0f, -1.0f, -1.0f, // Row 2, Column 1 - -2.0f, -2.0f, -2.0f, // Row 2, Column 2 - }, - /*alpha_data=*/ - {0.0f, 1.0f, 2.0f}, - /*output_data=*/ - { - 0.0f, 0.0f, 0.0f, // Row 1, Column 1 - 1.0f, 1.0f, 1.0f, // Row 1, Column 2 - 0.0f, -1.0f, -2.0f, // Row 2, Column 1 - 0.0f, -2.0f, -4.0f, // Row 2, Column 2 - }); - - SUCCEED(); -} - -float GetTolerance(float min, float max) { return (max - min) / 255.0; } - -TEST(PReluTest, Uint8Simple) +class PReluTest : public ::testing::Test { - std::unique_ptr memory_manager = std::make_unique(); - std::vector input_data{-0.8f, 0.2f, 0.9f, 0.7f, 0.1f, -0.4f}; - std::vector alpha_data{0.5f, 0.5f, 0.5f, 0.25f, 1.0f, 0.25f}; - std::vector ref_output_data{-0.4f, 0.2f, 0.9f, 0.7f, 0.1f, -0.1f}; + // Do nothing +}; - float kQuantizedTolerance = GetTolerance(-1.0, 1.0); - std::pair quant_param = quantizationParams(-1.0f, 1.0f); - - Tensor input_tensor = makeInputTensor( - {1, 2, 3, 1}, quant_param.first, quant_param.second, input_data, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor( - {1, 2, 3, 1}, quant_param.first, quant_param.second, alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - kernel.execute(); - - EXPECT_THAT(dequantizeTensorData(output_tensor), - FloatArrayNear(ref_output_data, kQuantizedTolerance)); - EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 3, 1})); - - SUCCEED(); -} - -TEST(PReluTest, Uint8Broadcast) +template std::vector checkPReluKernel(test_kernel::TestDataBase *test_data_base) { - std::vector input_data{ - 0.0f, 0.0f, 0.0f, // Row 1, Column 1 - 0.5f, 0.5f, 0.5f, // Row 1, Column 2 - -1.0f, -1.0f, -1.0f, // Row 2, Column 1 - -0.25f, -0.25f, -0.25f, // Row 2, Column 2 - }; - std::vector alpha_data{0.0f, 0.5f, -0.5f}; - std::vector ref_output_data{ - 0.0f, 0.0f, 0.0f, // Row 1, Column 1 - 0.5f, 0.5f, 0.5f, // Row 1, Column 2 - 0.0f, -0.5f, 0.5f, // Row 2, Column 1 - 0.0f, -0.125f, 0.125f // Row 2, Column 2 - }; - std::vector ref_quant_output_data{ - 128, 128, 128, // Row 1, Column 1 - 192, 192, 192, // Row 1, Column 2 - 128, 64, 192, // Row 2, Column 1 - 128, 112, 144 // Row 2, Column 2 - }; - float kQuantizedTolerance = 2 * (1. / 256); - const float kMin = -1; - const float kMax = 127.f / 128.f; - std::pair quant_param = quantizationParams(kMin, kMax); + MemoryManager memory_manager{}; + RuntimeModule runtime_module{}; + bool dealloc_input = true; - std::unique_ptr memory_manager = std::make_unique(); - Tensor input_tensor = makeInputTensor( - {1, 2, 2, 3}, quant_param.first, quant_param.second, input_data, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor( - {1, 1, 3}, quant_param.first, quant_param.second, alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::U8, quant_param.first, quant_param.second); + // Load model with single op + auto *model_data_raw = reinterpret_cast(test_data_base->get_model_ptr()); + ModuleLoader::load(&runtime_module, &memory_manager, model_data_raw, dealloc_input); - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - kernel.execute(); + auto *main_runtime_graph = runtime_module.getMainGraph(); + assert(main_runtime_graph->getNumOfInputTensors() == 2); - EXPECT_THAT(dequantizeTensorData(output_tensor), - FloatArrayNear(ref_output_data, kQuantizedTolerance)); - EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 2, 3})); - EXPECT_THAT(extractTensorData(output_tensor), - ::testing::ElementsAreArray(ref_quant_output_data)); -} + // set left input data + { + auto *input_tensor_data = reinterpret_cast(main_runtime_graph->configureGraphInput(0)); + std::copy(test_data_base->get_input_data_by_index(0).begin(), + test_data_base->get_input_data_by_index(0).end(), input_tensor_data); + } -TEST(PReluTest, SInt16_LWQ_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - // Rewrite this test in case layer-wise quantization for sint16 is supported - std::vector input_data(6); // data is not important - std::vector alpha_data(6); + // set right input data + { + auto *input_tensor_data = reinterpret_cast(main_runtime_graph->configureGraphInput(1)); + std::copy(test_data_base->get_input_data_by_index(1).begin(), + test_data_base->get_input_data_by_index(1).end(), input_tensor_data); + } - Tensor input_tensor = - makeInputTensor({1, 2, 3, 1}, 0.1, 0, input_data, memory_manager.get()); - Tensor alpha_tensor = - makeInputTensor({1, 2, 3, 1}, 0.1, 0, alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S16, 0.1, 0); + runtime_module.execute(); - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); -} + assert(main_runtime_graph->getNumOfOutputTensors() == 1); -TEST(PReluTest, SInt16_CWQ_Simple) -{ - std::unique_ptr memory_manager = std::make_unique(); - std::vector input_data{-0.8f, 0.2f, 0.9f, -0.7f, 0.1f, -0.4f}; - std::vector alpha_data{0.5f, 0.25f}; - std::vector ref_output_data{-0.4f, 0.2f, 0.9f, -0.175f, 0.1f, -0.1f}; - - std::vector alpha_scales{0.05f, 0.025f}; - std::vector zerop{0, 0}; - Tensor input_tensor = - makeInputTensor({1, 1, 3, 2}, 0.1, 0, input_data, memory_manager.get()); - Tensor alpha_tensor = - makeInputTensor({2}, alpha_scales, zerop, 0, alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S16, 0.025, 0); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - kernel.execute(); - - EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 1, 3, 2})); - EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data)); + T *output_data = reinterpret_cast(main_runtime_graph->getOutputDataByIndex(0)); + const size_t num_elements = (main_runtime_graph->getOutputDataSizeByIndex(0) / sizeof(T)); + std::vector output_data_vector(output_data, output_data + num_elements); + return output_data_vector; } -TEST(PReluTest, SInt16_CWQ_spatial_alpha_NEG) +TEST_F(PReluTest, Float_P) { - std::unique_ptr memory_manager = std::make_unique(); - std::vector input_data(6); // data is not important - std::vector alpha_data(6); - - std::vector alpha_scales{0.25f, 0.05f}; - std::vector zerop{0, 0}; - Tensor input_tensor = - makeInputTensor({1, 1, 3, 2}, 0.1, 0, input_data, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1, 1, 3, 2}, alpha_scales, zerop, 3, - alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S16, 0.1, 0); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); + test_kernel::TestDataFloatPRelu test_data_float_kernel; + std::vector output_data_vector = checkPReluKernel(&test_data_float_kernel); + EXPECT_THAT(output_data_vector, kernels::testing::FloatArrayNear( + test_data_float_kernel.get_output_data_by_index(0), 0.0001f)); } -TEST(PReluTest, SInt16_CWQ_wrong_dim_quant_NEG) +TEST_F(PReluTest, Inputs_type_mismatch_NEG) { - std::unique_ptr memory_manager = std::make_unique(); - std::vector input_data(6); // data is not important - std::vector alpha_data(6); - - std::vector alpha_scales{0.25f}; - std::vector zerop{0}; - Tensor input_tensor = - makeInputTensor({1, 1, 3, 2}, 0.1, 0, input_data, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1, 1, 1, 2}, alpha_scales, zerop, 1, - alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S16, 0.1, 0); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); + test_kernel::NegTestDataInputsTypeMismatchPReluKernel test_data_kernel; + MemoryManager memory_manager{}; + RuntimeModule runtime_module{}; + bool dealloc_input = true; + // Load model with single op + auto *model_data_raw = reinterpret_cast(test_data_kernel.get_model_ptr()); + EXPECT_DEATH(ModuleLoader::load(&runtime_module, &memory_manager, model_data_raw, dealloc_input), + ""); } -TEST(PReluTest, SInt16_CWQ_uneven_shape1) +TEST_F(PReluTest, Input_output_type_mismatch_NEG) { - std::unique_ptr memory_manager = std::make_unique(); - std::vector input_data{-0.8f, 0.2f, 0.9f, -0.7f, 0.1f, -0.4f}; - std::vector alpha_data{0.5f, 0.25f}; - std::vector ref_output_data{-0.4f, 0.2f, 0.9f, -0.175f, 0.1f, -0.1f}; - - std::vector alpha_scales{0.05f, 0.025f}; - std::vector zerop{0, 0}; - Tensor input_tensor = - makeInputTensor({1, 1, 3, 2}, 0.1, 0, input_data, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1, 1, 2}, alpha_scales, zerop, 2, - alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S16, 0.025, 0); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - kernel.execute(); - - EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 1, 3, 2})); - EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data)); -} - -TEST(PReluTest, SInt16_CWQ_uneven_shape2) -{ - std::unique_ptr memory_manager = std::make_unique(); - std::vector input_data{ - 0.0f, 0.0f, 0.0f, // Row 1, Column 1 - 0.5f, 0.5f, 0.5f, // Row 1, Column 2 - -1.0f, -1.0f, -1.0f, // Row 2, Column 1 - -0.25f, -0.25f, -0.25f, // Row 2, Column 2 - }; - std::vector alpha_data{0.0f, 0.5f, -0.5f}; - std::vector ref_output_data{ - 0.0f, 0.0f, 0.0f, // Row 1, Column 1 - 0.5f, 0.5f, 0.5f, // Row 1, Column 2 - 0.0f, -0.5f, 0.5f, // Row 2, Column 1 - 0.0f, -0.125f, 0.125f // Row 2, Column 2 - }; - - std::vector alpha_scales{1.f, 0.05f, 0.1f}; - std::vector zerop{0, 0, 0}; - Tensor input_tensor = - makeInputTensor({1, 2, 2, 3}, 0.01, 0, input_data, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1, 1, 1, 3}, alpha_scales, zerop, 3, - alpha_data, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S16, 0.001, 0); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - kernel.execute(); - - EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 2, 3})); - EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data)); -} - -TEST(PReluTest, Input_Output_Type_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - Tensor input_tensor = makeInputTensor({1}, {1.f}, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1}, {1.f}, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::U8); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); -} - -TEST(PReluTest, Input_Alpha_Type_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - Tensor input_tensor = makeInputTensor({1}, {1.f}, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1}, {1}, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::FLOAT32); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); -} - -TEST(PReluTest, Invalid_Input_Type_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - Tensor input_tensor = makeInputTensor({1}, {1}, memory_manager.get()); - Tensor alpha_tensor = makeInputTensor({1}, {1}, memory_manager.get()); - Tensor output_tensor = makeOutputTensor(DataType::S64); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - kernel.configure(); - memory_manager->allocate_memory(output_tensor); - EXPECT_ANY_THROW(kernel.execute()); -} - -TEST(PReluTest, Input_Output_U8_CWQ_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - std::vector scales{1.f, 1.f}; - std::vector zerop{0, 0}; - std::vector dummy_data(4, 0.f); - Tensor input_tensor = - makeInputTensor({2, 2}, scales, zerop, 0, dummy_data, memory_manager.get()); - Tensor alpha_tensor = - makeInputTensor({2, 2}, scales, zerop, 0, dummy_data, memory_manager.get()); - Tensor output_tensor = - makeInputTensor({2, 2}, scales, zerop, 0, dummy_data, memory_manager.get()); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); -} - -TEST(PReluTest, Input_Output_S16_CWQ_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - std::vector scales{1.f, 1.f}; - std::vector zerop{0, 0}; - std::vector dummy_data(4, 0.f); - Tensor input_tensor = - makeInputTensor({2, 2}, scales, zerop, 0, dummy_data, memory_manager.get()); - Tensor alpha_tensor = - makeInputTensor({2, 2}, scales, zerop, 0, dummy_data, memory_manager.get()); - Tensor output_tensor = - makeInputTensor({2, 2}, scales, zerop, 0, dummy_data, memory_manager.get()); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); -} - -TEST(PReluTest, Mixing_U8_S16_NEG) -{ - std::unique_ptr memory_manager = std::make_unique(); - std::vector dummy_data(4, 0.f); - Tensor input_tensor = - makeInputTensor({2, 2}, 1.f, 0, dummy_data, memory_manager.get()); - Tensor alpha_tensor = - makeInputTensor({2, 2}, 1.f, 0, dummy_data, memory_manager.get()); - Tensor output_tensor = - makeInputTensor({2, 2}, 1.f, 0, dummy_data, memory_manager.get()); - - PRelu kernel(&input_tensor, &alpha_tensor, &output_tensor); - EXPECT_ANY_THROW(kernel.configure()); + test_kernel::NegTestDataInputOutputTypeMismatchPReluKernel test_data_kernel; + MemoryManager memory_manager{}; + RuntimeModule runtime_module{}; + bool dealloc_input = true; + // Load model with single op + auto *model_data_raw = reinterpret_cast(test_data_kernel.get_model_ptr()); + EXPECT_DEATH(ModuleLoader::load(&runtime_module, &memory_manager, model_data_raw, dealloc_input), + ""); } } // namespace -} // namespace kernels } // namespace luci_interpreter