diff --git a/src/BUILD b/src/BUILD index ed36bfce79..71b1724ba4 100644 --- a/src/BUILD +++ b/src/BUILD @@ -1948,6 +1948,8 @@ cc_test( "test/dummy_tflite/1/dummy.tflite", "test/dummyUppercase/1/dummy.xml", "test/dummyUppercase/1/dummy.bin", + "test/no_name_output/1/model.xml", + "test/no_name_output/1/model.bin", "test/increment_1x3x4x5/1/increment_1x3x4x5.xml", "test/increment_1x3x4x5/1/increment_1x3x4x5.bin", "test/mediapipe/config_mediapipe_openai_chat_completions_mock.json", diff --git a/src/modelinstance.cpp b/src/modelinstance.cpp index 7497be90bd..51b523f078 100644 --- a/src/modelinstance.cpp +++ b/src/modelinstance.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include // TODO windows @@ -305,9 +306,14 @@ static Status applyLayoutConfiguration(const ModelConfig& config, std::shared_pt } OV_LOGGER("ov::Model: {}, model->outputs()", reinterpret_cast(model.get())); - for (const ov::Output& output : model->outputs()) { + size_t outputIndex = 0; + for (ov::Output& output : model->outputs()) { try { OV_LOGGER("ov::Output output: {}, output.get_any_name()", reinterpret_cast(&output)); + if (output.get_names().size() == 0) { + std::unordered_set dummy_name{"OUT_" + std::to_string(outputIndex)}; + output.add_names(dummy_name); + } std::string name = output.get_any_name(); std::string mappedName = config.getMappingOutputByKey(name).empty() ? name : config.getMappingOutputByKey(name); if (config.getLayouts().count(mappedName) > 0) { @@ -355,6 +361,7 @@ static Status applyLayoutConfiguration(const ModelConfig& config, std::shared_pt modelVersion); return StatusCode::UNKNOWN_ERROR; } + outputIndex++; } try { diff --git a/src/test/modelinstance_test.cpp b/src/test/modelinstance_test.cpp index 1cf5a0c5d1..36518082c9 100644 --- a/src/test/modelinstance_test.cpp +++ b/src/test/modelinstance_test.cpp @@ -112,6 +112,18 @@ TEST_F(TestUnloadModel, CanUnloadModelNotHoldingModelInstanceAtPredictPath) { EXPECT_TRUE(modelInstance.canUnloadInstance()); } +TEST_F(TestUnloadModel, NoNameOutput) { + ovms::ModelInstance modelInstance("UNUSED_NAME", UNUSED_MODEL_VERSION, *ieCore); + ASSERT_EQ(modelInstance.loadModel(NO_NAME_MODEL_CONFIG), ovms::StatusCode::OK); + ASSERT_EQ(ovms::ModelVersionState::AVAILABLE, modelInstance.getStatus().getState()); + EXPECT_EQ(modelInstance.getInputsInfo().count("INPUT1"), 1); + EXPECT_EQ(modelInstance.getInputsInfo().count("INPUT2"), 1); + EXPECT_EQ(modelInstance.getOutputsInfo().count("OUT_0"), 1); + EXPECT_EQ(modelInstance.getOutputsInfo().count("OUT_1"), 1); + modelInstance.retireModel(); + EXPECT_EQ(ovms::ModelVersionState::END, modelInstance.getStatus().getState()); +} + TEST_F(TestUnloadModel, UnloadWaitsUntilMetadataResponseIsBuilt) { static std::thread thread; static std::shared_ptr instance; diff --git a/src/test/no_name_output/1/model.bin b/src/test/no_name_output/1/model.bin new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/test/no_name_output/1/model.xml b/src/test/no_name_output/1/model.xml new file mode 100644 index 0000000000..f9f3585906 --- /dev/null +++ b/src/test/no_name_output/1/model.xml @@ -0,0 +1,86 @@ + + + + + + + + 1 + 10 + + + + + + + + 1 + 10 + + + + + + + + 1 + 10 + + + 1 + 10 + + + + + 1 + 10 + + + + + + + + 1 + 10 + + + 1 + 10 + + + + + 1 + 10 + + + + + + + 1 + 10 + + + + + + + 1 + 10 + + + + + + + + + + + + + + diff --git a/src/test/test_utils.hpp b/src/test/test_utils.hpp index b429959fac..4466da6710 100644 --- a/src/test/test_utils.hpp +++ b/src/test/test_utils.hpp @@ -77,6 +77,7 @@ const std::string passthrough_string_model_location = getGenericFullPathForSrcTe const std::string dummy_saved_model_location = getGenericFullPathForSrcTest(std::filesystem::current_path().u8string() + "/src/test/dummy_saved_model", false); const std::string dummy_tflite_location = getGenericFullPathForSrcTest(std::filesystem::current_path().u8string() + "/src/test/dummy_tflite", false); const std::string scalar_model_location = getGenericFullPathForSrcTest(std::filesystem::current_path().u8string() + "/src/test/scalar", false); +const std::string no_name_output_model_location = std::filesystem::current_path().u8string() + "/src/test/no_name_output"; const ovms::ModelConfig DUMMY_MODEL_CONFIG{ "dummy", @@ -213,6 +214,21 @@ const ovms::ModelConfig SCALAR_MODEL_CONFIG{ scalar_model_location, // local path }; +const ovms::ModelConfig NO_NAME_MODEL_CONFIG{ + "no_name_output", + no_name_output_model_location, // base path + "CPU", // target device + "1", // batchsize + 1, // NIREQ + false, // is stateful + true, // idle sequence cleanup enabled + false, // low latency transformation enabled + 500, // stateful sequence max number + "", // cache directory + 1, // model_version unused since version are read from path + no_name_output_model_location, // local path +}; + constexpr const char* DUMMY_MODEL_INPUT_NAME = "b"; constexpr const char* DUMMY_MODEL_OUTPUT_NAME = "a"; constexpr const int DUMMY_MODEL_INPUT_SIZE = 10; diff --git a/tests/models/no_name_output.py b/tests/models/no_name_output.py new file mode 100644 index 0000000000..6b2877e29e --- /dev/null +++ b/tests/models/no_name_output.py @@ -0,0 +1,55 @@ +# +# Copyright (c) 2020 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import openvino.runtime as ov +import numpy as np +import os + +batch_dim = [] +shape = [1, 10] +dtype = np.int8 +model_name = "no_name_output" +model_version_dir = model_name +print(batch_dim + shape) +in0 = ov.opset1.parameter(shape=batch_dim + shape, dtype=dtype, name="INPUT1") +in1 = ov.opset1.parameter(shape=batch_dim + shape, dtype=dtype, name="INPUT2") +op0 = ov.opset1.multiply(in1, in0, name="MULTIPLY") +op1 = ov.opset1.add(in1, in0, name="ADD") + +model = ov.Model([op0, op1], [in0, in1], model_name) + +for idx, inp in enumerate(model.inputs): + print(f"Input {idx}: {inp.get_names()} {inp.get_shape()} {inp.get_index()}") +print(model.outputs) +for idx, out in enumerate(model.outputs): + print(f"Output {idx}: {out.get_names()} {out.get_shape()} {out.get_index()} {out.get_any_name()}") + +try: + os.makedirs(model_version_dir) +except OSError as ex: + pass # ignore existing dir + +ov.serialize(model, model_version_dir + "/model.xml", model_version_dir + "/model.bin") + +ov_model = ov.Core().read_model(model_version_dir + "/model.xml") +compiled_model = ov.Core().compile_model(model, "CPU") + +input_data = np.ones((1, 10),dtype=np.int8)*10 +results = compiled_model({"INPUT1": input_data, "INPUT2": input_data}) + +print(input_data) +print(results) +