diff --git a/cmake/packaging/debian.cmake b/cmake/packaging/debian.cmake index 59b312963c180d..c82dca0364b463 100644 --- a/cmake/packaging/debian.cmake +++ b/cmake/packaging/debian.cmake @@ -99,6 +99,7 @@ macro(ov_cpack_settings) 2024.3.0 2024.4.0 2024.5.0 + 2024.6.0 ) ov_check_conflicts_versions(conflicting_versions) diff --git a/cmake/packaging/rpm.cmake b/cmake/packaging/rpm.cmake index a4a63c35858bf9..6e9d535d41cfff 100644 --- a/cmake/packaging/rpm.cmake +++ b/cmake/packaging/rpm.cmake @@ -87,6 +87,7 @@ macro(ov_cpack_settings) 2024.3.0 2024.4.0 2024.5.0 + 2024.6.0 ) ov_check_conflicts_versions(conflicting_versions) diff --git a/docs/articles_en/documentation/openvino-security.rst b/docs/articles_en/documentation/openvino-security.rst index 99cf13161bf243..255dbbd2b62c35 100644 --- a/docs/articles_en/documentation/openvino-security.rst +++ b/docs/articles_en/documentation/openvino-security.rst @@ -55,7 +55,8 @@ Hardware-based protection such as Intel Software Guard Extensions (Intel SGX) ca decryption operation secrets and bind them to a device. For more information, see the `Intel Software Guard Extensions `__. -Use the ``ov::Core::read_model`` to set model representations and weights respectively. +Use the `ov::Core::read_model <../api/c_cpp_api/group__ov__dev__exec__model.html#classov_1_1_core_1ae0576a95f841c3a6f5e46e4802716981>`__ +to set model representations and weights respectively. Currently there is no way to read external weights from memory for ONNX models. The ``ov::Core::read_model(const std::string& model, const Tensor& weights)`` method @@ -65,6 +66,20 @@ should be called with ``weights`` passed as an empty ``ov::Tensor``. :language: cpp :fragment: part1 + +Encrypted models that have already been compiled, in the form of blob files, +can be loaded using the +`ov::Core::import_model <../api/c_cpp_api/group__ov__runtime__cpp__api.html#_CPPv4N2ov4Core12import_modelERNSt7istreamERKNSt6stringERK6AnyMap>`__ +method, as shown in the code sample below: + +.. code-block:: cpp + + ov::Core core; + // Import a model from a blob. + std::ifstream compiled_blob(blob, std::ios_base::in | std::ios_base::binary); + auto compiled_model = core.import_model(compiled_blob, "CPU"); + + Additional Resources #################### diff --git a/docs/sphinx_setup/assets/versions_raw.js b/docs/sphinx_setup/assets/versions_raw.js index 8045057450bf5f..8b9457135836d5 100644 --- a/docs/sphinx_setup/assets/versions_raw.js +++ b/docs/sphinx_setup/assets/versions_raw.js @@ -1 +1 @@ -var data='[{"version": "2024"}, {"version": "2023.3"}, {"version": "2022.3"}, {"version": "nightly"}, {"version": "archives"}]'; \ No newline at end of file +var data='[{"version": "2024"}, {"version": "2023.3"}, {"version": "nightly"}, {"version": "archives"}]'; diff --git a/src/core/include/openvino/core/except.hpp b/src/core/include/openvino/core/except.hpp index fdb3746d323350..a923cd98c7e576 100644 --- a/src/core/include/openvino/core/except.hpp +++ b/src/core/include/openvino/core/except.hpp @@ -62,6 +62,7 @@ class OPENVINO_API AssertFailure : public Exception { const char* check_string, const std::string& context_info, const std::string& explanation); + virtual ~AssertFailure(); protected: explicit AssertFailure(const std::string& what_arg) : ov::Exception(what_arg) {} @@ -71,6 +72,7 @@ class OPENVINO_API AssertFailure : public Exception { class OPENVINO_API NotImplemented : public AssertFailure { public: [[noreturn]] static void create(const char* file, int line, const std::string& explanation); + virtual ~NotImplemented(); static const std::string default_msg; diff --git a/src/core/src/except.cpp b/src/core/src/except.cpp index 6ce0568e04e387..7cddc5b3ec4a52 100644 --- a/src/core/src/except.cpp +++ b/src/core/src/except.cpp @@ -45,8 +45,12 @@ void ov::AssertFailure::create(const char* file, throw ov::AssertFailure(make_what(file, line, check_string, context_info, explanation)); } +ov::AssertFailure::~AssertFailure() = default; + void ov::NotImplemented::create(const char* file, int line, const std::string& explanation) { throw ov::NotImplemented(make_what(file, line, nullptr, default_msg, explanation)); } +ov::NotImplemented::~NotImplemented() = default; + const std::string ov::NotImplemented::default_msg{"Not Implemented"}; diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index ed375fd742d7ed..a73c13814d7663 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -867,7 +867,6 @@ const std::unordered_map get_supported_ops_fx() { {"aten.hardtanh.default", op::translate_hardtanh}, {"aten.hardtanh_.default", op::inplace_op}, {"aten.index.Tensor", op::translate_index_fx}, - {"aten._unsafe_index.Tensor", op::translate_index_fx}, {"aten.index_select.default", op::translate_index_select}, {"aten.isfinite.default", op::translate_1to1_match_1_inputs}, {"aten.isinf.default", op::translate_1to1_match_1_inputs}, diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 0ed64d49ea68dd..244adb7c40c23c 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -242,26 +242,32 @@ std::vector> get_streams_info_table(const int input_streams, n_threads_per_stream = proc_type_table[0][ALL_PROC]; } } else { - int numa_index = 1; + size_t socket_index = 0; + for (socket_index = 0; socket_index < proc_socket_table.size(); socket_index++) { + if (proc_socket_table[socket_index][PROC_SOCKET_ID] == current_socket_id) { + break; + } + } + const std::vector& current_socket_info = proc_socket_table[socket_index]; n_threads_per_stream = model_prefer_threads == 0 - ? proc_type_table[numa_index][ALL_PROC] - : std::min(proc_type_table[numa_index][ALL_PROC], model_prefer_threads); + ? current_socket_info[ALL_PROC] + : std::min(current_socket_info[ALL_PROC], model_prefer_threads); stream_info[THREADS_PER_STREAM] = n_threads_per_stream; - if (proc_type_table[numa_index][ALL_PROC] == proc_type_table[numa_index][MAIN_CORE_PROC]) { + if (current_socket_info[ALL_PROC] == current_socket_info[MAIN_CORE_PROC]) { stream_info[PROC_TYPE] = MAIN_CORE_PROC; - update_streams_per_node(MAIN_CORE_PROC, proc_type_table[numa_index]); - } else if (proc_type_table[numa_index][ALL_PROC] == proc_type_table[numa_index][EFFICIENT_CORE_PROC]) { + update_streams_per_node(MAIN_CORE_PROC, current_socket_info); + } else if (current_socket_info[ALL_PROC] == current_socket_info[EFFICIENT_CORE_PROC]) { stream_info[PROC_TYPE] = EFFICIENT_CORE_PROC; - update_streams_per_node(EFFICIENT_CORE_PROC, proc_type_table[numa_index]); + update_streams_per_node(EFFICIENT_CORE_PROC, current_socket_info); } else { stream_info[PROC_TYPE] = ALL_PROC; - update_mix_stream_info(proc_type_table[numa_index], - {proc_type_table[numa_index]}, + update_mix_stream_info(current_socket_info, + proc_type_table, n_threads_per_stream, IStreamsExecutor::Config::StreamsMode::SUB_STREAMS_NULL, ALL_PROC); } - update_ids_method(proc_type_table[numa_index]); + update_ids_method(current_socket_info); } } else { n_threads = diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp index 4aec56d98873fa..39a2d20c092835 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp @@ -1979,6 +1979,88 @@ void jit_relu_emitter::emit_isa(const std::vector &in_vec_idxs, const st h->fmaxnm(dst.s, src.s, tmp.s); } +/// ROUND_HALF_AWAY_FROM_ZERO /// +jit_round_half_away_from_zero_emitter::jit_round_half_away_from_zero_emitter + (dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& node) + : jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) { +} + +jit_round_half_away_from_zero_emitter::jit_round_half_away_from_zero_emitter + (dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc) + : jit_emitter(host, host_isa, exec_prc) { +} + +size_t jit_round_half_away_from_zero_emitter::get_inputs_count() const { return 1; } + +std::set> jit_round_half_away_from_zero_emitter::get_supported_precisions(const std::shared_ptr& node) { + return {{element::f32}}; +} + +void jit_round_half_away_from_zero_emitter::emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const { + if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) { + emit_isa(in_vec_idxs, out_vec_idxs); + } else { + OV_CPU_JIT_EMITTER_THROW("Can't create jit eltwise kernel"); + } +} + +template +void jit_round_half_away_from_zero_emitter::emit_isa(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const { + OV_CPU_JIT_EMITTER_ASSERT(exec_prc_ == ov::element::f32, "unsupported precision: " + exec_prc_.to_string()); + + using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits::TReg; + + TReg src = TReg(in_vec_idxs[0]); + TReg dst = TReg(out_vec_idxs[0]); + + h->frinta(dst.s, src.s); +} + +/// ROUND_HALF_TO_EVEN /// +jit_round_half_to_even_emitter::jit_round_half_to_even_emitter + (dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& node) + : jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) { +} + +jit_round_half_to_even_emitter::jit_round_half_to_even_emitter + (dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc) + : jit_emitter(host, host_isa, exec_prc) { +} + +size_t jit_round_half_to_even_emitter::get_inputs_count() const { return 1; } + +std::set> jit_round_half_to_even_emitter::get_supported_precisions(const std::shared_ptr& node) { + return {{element::f32}}; +} + +void jit_round_half_to_even_emitter::emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const { + if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) { + emit_isa(in_vec_idxs, out_vec_idxs); + } else { + OV_CPU_JIT_EMITTER_THROW("Can't create jit eltwise kernel"); + } +} + +template +void jit_round_half_to_even_emitter::emit_isa(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const { + OV_CPU_JIT_EMITTER_ASSERT(exec_prc_ == ov::element::f32, "unsupported precision: " + exec_prc_.to_string()); + + using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits::TReg; + + TReg src = TReg(in_vec_idxs[0]); + TReg dst = TReg(out_vec_idxs[0]); + + h->frintn(dst.s, src.s); +} + /// SELECT /// jit_select_emitter::jit_select_emitter(dnnl::impl::cpu::aarch64::jit_generator *host, dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp index 2cb7e6928ade3e..2173a1487f1057 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp @@ -831,6 +831,48 @@ class jit_relu_emitter : public jit_emitter { void emit_isa(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const; }; +class jit_round_half_away_from_zero_emitter : public jit_emitter { +public: + jit_round_half_away_from_zero_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc = ov::element::f32); + + jit_round_half_away_from_zero_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& node); + + size_t get_inputs_count() const override; + + static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); + +private: + void emit_impl(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const override; + + template + void emit_isa(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const; +}; + +class jit_round_half_to_even_emitter : public jit_emitter { +public: + jit_round_half_to_even_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc = ov::element::f32); + + jit_round_half_to_even_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& node); + + size_t get_inputs_count() const override; + + static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); + +private: + void emit_impl(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const override; + + template + void emit_isa(const std::vector &in_vec_idxs, const std::vector &out_vec_idxs) const; +}; + class jit_select_emitter : public jit_emitter { public: jit_select_emitter(dnnl::impl::cpu::aarch64::jit_generator *host, diff --git a/src/plugins/intel_cpu/src/graph_context.cpp b/src/plugins/intel_cpu/src/graph_context.cpp index e200766fa4791c..5b967ed58a7918 100644 --- a/src/plugins/intel_cpu/src/graph_context.cpp +++ b/src/plugins/intel_cpu/src/graph_context.cpp @@ -27,6 +27,7 @@ GraphContext::GraphContext(const Config& config, numNumaNodes = 1; if (streamExecutor) { cpuStreamExecutor = std::dynamic_pointer_cast(streamExecutor); + numaNodeId = cpuStreamExecutor ? cpuStreamExecutor->get_numa_node_id() : 0; auto nNumaNodes = get_num_numa_nodes(); if (numNumaNodes < nNumaNodes) numNumaNodes = nNumaNodes; diff --git a/src/plugins/intel_cpu/src/graph_context.h b/src/plugins/intel_cpu/src/graph_context.h index db2b126213978c..ce51af0c81b4bd 100644 --- a/src/plugins/intel_cpu/src/graph_context.h +++ b/src/plugins/intel_cpu/src/graph_context.h @@ -44,12 +44,8 @@ class GraphContext { return rtParamsCache; } - DnnlScratchPadPtr getScratchPad(int subStreamID = 0) const { - if (subStreamID < 0) - subStreamID = 0; - if (subStreamID >= numNumaNodes - 1) - subStreamID = numNumaNodes - 1; - return rtScratchPads[subStreamID]; + DnnlScratchPadPtr getScratchPad() const { + return rtScratchPads[numaNodeId]; } const std::vector& getScratchPads() const { @@ -101,6 +97,7 @@ class GraphContext { std::shared_ptr subMemoryManager; int numNumaNodes = 1; + int numaNodeId = 0; std::shared_ptr memoryStatesRegister; std::shared_ptr networkMemoryControl; diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index f4c2b0eb686df6..de5c53429138c4 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -1106,7 +1106,7 @@ void Node::toNumaNodeImpl(int numaNodeID) { // create scratch pad from specified numa node if (scratchpadMem) { - scratchpadMem = context->getScratchPad(numaNodeID)->createScratchPadMem(scratchpadMem->getDescPtr()); + scratchpadMem = context->getScratchPad()->createScratchPadMem(scratchpadMem->getDescPtr()); primArgs[DNNL_ARG_SCRATCHPAD] = scratchpadMem->getPrimitive(); } diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 948bd6999ce27a..453b8323fe9e66 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -788,7 +788,7 @@ class Node { MemoryPtr getScratchPadMem(const MemoryDescPtr& desc) { if (!scratchpadMem || !scratchpadMem->getDesc().isCompatible(*desc)) { - scratchpadMem = context->getScratchPad(curNumaNode)->createScratchPadMem(desc); + scratchpadMem = context->getScratchPad()->createScratchPadMem(desc); } return scratchpadMem; } diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index 2ee858e730c900..537f9111b0ceec 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -12,7 +12,7 @@ #include #include #include "cpu/x64/cpu_isa_traits.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" +#include "shape_inference/shape_inference.hpp" #include "eltwise.h" #include "fake_quantize.h" @@ -128,12 +128,11 @@ bool DeconvKey::operator==(const DeconvKey &rhs) const { */ class DeconfolutionShapeInferFactory : public ShapeInferFactory { public: - DeconfolutionShapeInferFactory(std::shared_ptr op) : m_op(op) {} + DeconfolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} + ShapeInferPtr makeShapeInfer() const override { - if (m_op->get_input_size() > 2) { - return std::make_shared(make_shape_inference(m_op), PortMask(2)); - } - return std::make_shared(make_shape_inference(m_op), EMPTY_PORT_MASK); + const auto port_mask = (m_op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK; + return make_shape_inference(m_op, port_mask); } private: std::shared_ptr m_op; diff --git a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp index 5f63904fbb9342..0374888e3d7fcb 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp @@ -49,6 +49,8 @@ bool JitEltwiseExecutor::isSupported( Algorithm::EltwisePowerStatic, Algorithm::EltwisePrelu, Algorithm::EltwiseRelu, + Algorithm::EltwiseRoundHalfAwayFromZero, + Algorithm::EltwiseRoundHalfToEven, Algorithm::EltwiseSelect, Algorithm::EltwiseSigmoid, Algorithm::EltwiseSoftSign, diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp index 266e78b3d46c77..3266bf8965c37b 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp @@ -73,7 +73,7 @@ class DnnlFCExecutor : public Executor { return; } const auto newPrimMemDesc = m_primitive->scratchPadDesc(); - m_scratchPadMemory = m_context->getScratchPad(numaNodeID)->createScratchPadMem(newPrimMemDesc); + m_scratchPadMemory = m_context->getScratchPad()->createScratchPadMem(newPrimMemDesc); m_primArgs[DNNL_ARG_SCRATCHPAD] = m_scratchPadMemory->getPrimitive(); if (m_primArgs.count(DNNL_ARG_WEIGHTS)) { @@ -139,7 +139,7 @@ class DnnlFCExecutor : public Executor { if (currentPrimitive && currentPrimitive->scratchPadDesc()->isCompatible(*newPrimMemDesc)) return; - m_scratchPadMemory = m_context->getScratchPad(curNumaNode)->createScratchPadMem(newPrimMemDesc); + m_scratchPadMemory = m_context->getScratchPad()->createScratchPadMem(newPrimMemDesc); m_primArgs[DNNL_ARG_SCRATCHPAD] = m_scratchPadMemory->getPrimitive(); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp index 5b9df5a6e77a55..2016e8f5820dee 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp @@ -112,8 +112,10 @@ class ExecutorContext { engine(graphContext->getEngine()), implPriorities(implPriorities), privateWeighCache(std::move(privateWeighCache)), - numNumaNodes(graphContext->getNumNumaNodes()) - {} + numNumaNodes(graphContext->getNumNumaNodes()) { + auto cpuStreamsExecutor = graphContext->getCPUStreamExecutor(); + curNumaNodeId = std::max(0, cpuStreamsExecutor ? cpuStreamsExecutor->get_numa_node_id() : curNumaNodeId); + } MultiCachePtr getRuntimeCache() const { auto runtimeCachePtr = runtimeCache.lock(); @@ -121,12 +123,8 @@ class ExecutorContext { return runtimeCachePtr; } - DnnlScratchPadPtr getScratchPad(int subStreamID = 0) const { - if (subStreamID < 0) - subStreamID = 0; - if (subStreamID >= numNumaNodes - 1) - subStreamID = numNumaNodes - 1; - return scratchPads[subStreamID]; + DnnlScratchPadPtr getScratchPad() const { + return scratchPads[curNumaNodeId]; } std::shared_ptr> getPrivateWeighCache() const { @@ -156,6 +154,7 @@ class ExecutorContext { // @todo remove after global cache is used exclusevly std::shared_ptr> privateWeighCache; int numNumaNodes; + int curNumaNodeId = -1; }; class ExecutorFactoryLegacy { diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index f1e78b04510914..19e466ad68751a 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -6,7 +6,7 @@ #include "openvino/op/eye.hpp" #include #include "openvino/core/parallel.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" +#include "shape_inference/shape_inference.hpp" #include "utils/bfloat16.hpp" #define THROW_ERROR(...) OPENVINO_THROW(NameFromType(getType()), " node with name '", getName(), "' ", __VA_ARGS__) @@ -33,13 +33,8 @@ class EyeShapeInferFactory : public ShapeInferFactory { public: EyeShapeInferFactory(std::shared_ptr op) : m_op(op) {} ShapeInferPtr makeShapeInfer() const override { - IShapeInfer::port_mask_t port_mask = EMPTY_PORT_MASK; - if (m_op->get_input_size() == 4) { - port_mask = PortMask(Eye::ROWS_NUM, Eye::COLS_NUM, Eye::DIAGONAL_INDEX, Eye::BATCH_SHAPE); - } else { - port_mask = PortMask(Eye::ROWS_NUM, Eye::COLS_NUM, Eye::DIAGONAL_INDEX); - } - return std::make_shared(make_shape_inference(m_op), port_mask); + return (m_op->get_input_size() == 4) ? make_shape_inference(m_op) + : make_shape_inference(m_op, PortMask(Eye::ROWS_NUM, Eye::COLS_NUM)); } private: std::shared_ptr m_op; diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 7f6ed99b1173d7..307125ef0069e0 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -134,7 +134,7 @@ void FullyConnected::prepareParams() { void FullyConnected::initTensorParallelSync() { if (tp_cfg.enable_tensor_parallel) { tp_cfg.id = tp_cfg.sub_memory->get_memory_id(tp_cfg.w_rank); - OPENVINO_ASSERT(tp_cfg.id > 0, "Tensor Parallel Config ID cannot be negative."); + OPENVINO_ASSERT(tp_cfg.id >= 0, "Tensor Parallel Config ID cannot be negative."); tp_cfg.sub_memory->set_memory_used(tp_cfg.id, tp_cfg.w_rank); while (true) { std::lock_guard lock(tp_cfg.sub_memory->_flagMutex); diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.cpp b/src/plugins/intel_cpu/src/nodes/interpolate.cpp index 7eed5c1df9789b..37008ee17a9603 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/interpolate.cpp @@ -21,7 +21,6 @@ #include "openvino/opsets/opset11.hpp" #include "openvino/opsets/opset4.hpp" #include "shape_inference/shape_inference.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" #include "shape_inference/static_shape.hpp" #include "utils/bfloat16.hpp" #include "utils/cpu_utils.hpp" @@ -1763,19 +1762,14 @@ class InterpolateShapeInferFactory : public ShapeInferFactory { public: InterpolateShapeInferFactory(std::shared_ptr op) : m_op(op) {} ShapeInferPtr makeShapeInfer() const override { - IShapeInfer::port_mask_t port_mask = 0x00; if (auto interp4 = ov::as_type_ptr(m_op)) { const auto &attr = interp4->get_attrs(); - - if (attr.shape_calculation_mode == ngInterpShapeCalcMode::SCALES) { - port_mask = PortMask(Interpolate::SCALES_ID, Interpolate::AXES_ID); - } else if (attr.shape_calculation_mode == ngInterpShapeCalcMode::SIZES) { - port_mask = PortMask(Interpolate::TARGET_SHAPE_ID, Interpolate::AXES_ID); - } else { - OPENVINO_ASSERT(false, "Unsupported interpolate shape calculation mode"); - } + const auto is_supported_mode = (attr.shape_calculation_mode == ngInterpShapeCalcMode::SCALES) || + (attr.shape_calculation_mode == ngInterpShapeCalcMode::SIZES); + OPENVINO_ASSERT(is_supported_mode, "Unsupported interpolate shape calculation mode"); + return make_shape_inference(m_op); } else if (auto interp11 = ov::as_type_ptr(m_op)) { - port_mask = PortMask(Interpolate::SIZE_OR_SCALE_ID_V11, Interpolate::AXES_ID_V11); + return make_shape_inference(m_op); } else { OPENVINO_THROW("Shape infer factory cannot be created for ", m_op->get_type_name(), @@ -1783,7 +1777,6 @@ class InterpolateShapeInferFactory : public ShapeInferFactory { m_op->get_friendly_name(), ", only versions 4 and 11 are supported."); } - return std::make_shared(make_shape_inference(m_op), port_mask); } private: diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp index b2c1c52a6912a1..06721a685be610 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp @@ -676,6 +676,8 @@ std::shared_ptr jit_uni_eltwise_generic::create_eltwise_emitte OV_CASE(Algorithm::EltwisePowerStatic, ov::intel_cpu::aarch64::jit_power_static_emitter), OV_CASE(Algorithm::EltwisePrelu, ov::intel_cpu::aarch64::jit_prelu_emitter), OV_CASE(Algorithm::EltwiseRelu, ov::intel_cpu::aarch64::jit_relu_emitter), + OV_CASE(Algorithm::EltwiseRoundHalfAwayFromZero, ov::intel_cpu::aarch64::jit_round_half_away_from_zero_emitter), + OV_CASE(Algorithm::EltwiseRoundHalfToEven, ov::intel_cpu::aarch64::jit_round_half_to_even_emitter), OV_CASE(Algorithm::EltwiseSelect, ov::intel_cpu::aarch64::jit_select_emitter), OV_CASE(Algorithm::EltwiseSigmoid, ov::intel_cpu::aarch64::jit_sigmoid_emitter), OV_CASE(Algorithm::EltwiseSoftSign, ov::intel_cpu::aarch64::jit_soft_sign_emitter), @@ -858,6 +860,8 @@ std::set> eltwise_precision_helper::get_supported_pre OV_CASE(Algorithm::EltwiseMultiply, jit_multiply_emitter), OV_CASE(Algorithm::EltwisePrelu, jit_prelu_emitter), OV_CASE(Algorithm::EltwisePowerStatic, jit_power_static_emitter), + OV_CASE(Algorithm::EltwiseRoundHalfAwayFromZero, jit_round_half_away_from_zero_emitter), + OV_CASE(Algorithm::EltwiseRoundHalfToEven, jit_round_half_to_even_emitter), OV_CASE(Algorithm::EltwiseSelect, jit_select_emitter), OV_CASE(Algorithm::EltwiseSigmoid, jit_sigmoid_emitter), OV_CASE(Algorithm::EltwiseSoftSign, jit_soft_sign_emitter), diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index 2c83a4ea45259a..5dc7c8818dd52b 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -4,7 +4,7 @@ #include "reference.h" #include "common/cpu_memcpy.h" -#include "shape_inference/shape_inference_ngraph.hpp" +#include "shape_inference/shape_inference.hpp" namespace ov { namespace intel_cpu { @@ -14,7 +14,7 @@ class ReferenceShapeInferFactory : public ShapeInferFactory { ReferenceShapeInferFactory(std::shared_ptr op) : m_op{std::move(op)} {} ShapeInferPtr makeShapeInfer() const override { - return std::make_shared(make_shape_inference(m_op), FULL_PORT_MASK); + return make_shape_inference(m_op, FULL_PORT_MASK); } private: diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 679907c2d9cf28..108123fc3fe620 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -11,22 +11,22 @@ #include "nodes/input.h" #include "nodes/reorder.h" #include "openvino/core/parallel.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" -#include "transformations/utils/utils.hpp" - -#include "ov_ops/augru_cell.hpp" -#include "ov_ops/augru_sequence.hpp" #include "openvino/op/gru_cell.hpp" #include "openvino/op/gru_sequence.hpp" #include "openvino/op/lstm_sequence.hpp" #include "openvino/op/rnn_cell.hpp" #include "openvino/op/rnn_sequence.hpp" +#include "ov_ops/augru_cell.hpp" +#include "ov_ops/augru_sequence.hpp" +#include "shape_inference/shape_inference.hpp" +#include "transformations/utils/utils.hpp" using namespace dnnl; namespace ov { namespace intel_cpu { + namespace node { static rnn_direction ieDirection2dnnl(const std::shared_ptr& op) { @@ -356,19 +356,17 @@ namespace { * dimentions permutation, necessary due to the mismatch between the ngrpah and the oneDNN RNN node descriptions. * */ -class RnnShapeInfer : public NgraphShapeInfer { +class RnnShapeInfer : public IShapeInfer { public: - RnnShapeInfer(std::shared_ptr op) : - NgraphShapeInfer(make_shape_inference(op), EMPTY_PORT_MASK) { - is_sequence = !(RNN::isCell(op)); - - native_order = RNN::testNativeOrder(op); - } + RnnShapeInfer(std::shared_ptr op) + : is_sequence(!(RNN::isCell(op))), + native_order(RNN::testNativeOrder(op)), + m_shape_infer(make_shape_inference(std::move(op))) {} Result infer( const std::vector>& input_shapes, const std::unordered_map& data_dependency) override { - auto result = NgraphShapeInfer::infer(input_shapes, data_dependency); + auto result = m_shape_infer->infer(input_shapes, data_dependency); if (ShapeInferStatus::success != result.status) { OPENVINO_THROW("Unexpected: Unexpected shape inference result status"); } @@ -382,10 +380,24 @@ class RnnShapeInfer : public NgraphShapeInfer { return {std::move(originOutputShapes), result.status}; } + const ov::CoordinateDiff& get_pads_begin() override { + return m_shape_infer->get_pads_begin(); + } + + const ov::CoordinateDiff& get_pads_end() override { + return m_shape_infer->get_pads_end(); + } + + port_mask_t get_port_mask() const override { + return m_shape_infer->get_port_mask(); + } + private: - bool is_sequence = false; - bool native_order = true; + bool is_sequence; + bool native_order; + ShapeInferPtr m_shape_infer; }; + class RnnShapeInferFactory final : public ShapeInferFactory { public: RnnShapeInferFactory(std::shared_ptr op) : m_op(op) {} diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index 13671c22d102ae..81a5825c99afc0 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -8,7 +8,6 @@ #include "common/cpu_memcpy.h" #include "input.h" #include "openvino/opsets/opset1.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" #include "slice_shape_inference_utils.hpp" #include "shape_inference/custom/strided_slice.hpp" diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp index ac1aa0319386bb..9a6fdb8b260b97 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.cpp @@ -5,6 +5,7 @@ #include "matmul.hpp" #include "utils.hpp" #include "openvino/opsets/opset1.hpp" +#include "shape_inference/shape_inference.hpp" namespace ov { namespace intel_cpu { @@ -64,17 +65,17 @@ Result MMShapeInfer::infer( ShapeInferPtr MMShapeInferFactory::makeShapeInfer() const { if (const auto matmul = ov::as_type_ptr(m_op)) { - const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length(); - const bool transpose_a = matmul->get_transpose_a(); - const bool transpose_b = matmul->get_transpose_b(); const auto input_rank0 = matmul->get_input_partial_shape(0).rank().get_length(); const auto input_rank1 = matmul->get_input_partial_shape(1).rank().get_length(); + if (input_rank0 == input_rank1) { + const auto output_rank = matmul->get_output_partial_shape(0).rank().get_length(); + const bool transpose_a = matmul->get_transpose_a(); + const bool transpose_b = matmul->get_transpose_b(); return std::make_shared(output_rank, transpose_a, transpose_b); } else { - return std::make_shared(make_shape_inference(m_op), EMPTY_PORT_MASK); + return make_shape_inference(m_op); } - } else { OPENVINO_THROW("Unexpected operation type in the MatMul shape inference factory"); } diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp index 15f032772250ae..4ba83f6e72a639 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp @@ -3,8 +3,8 @@ // #include + #include "shape_inference/shape_inference_cpu.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" #pragma once namespace ov { @@ -42,4 +42,3 @@ class MMShapeInferFactory : public ShapeInferFactory { } // namespace node } // namespace intel_cpu } // namespace ov - diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp index c2e8ebd92430bf..dc0e6cb970afc4 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp @@ -4,8 +4,7 @@ #include "scaled_attn.hpp" -#include "shape_inference/shape_inference_cpu.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" +#include "shape_inference/shape_inference.hpp" #include "transformations/cpu_opset/common/op/sdpa.hpp" #include "utils.hpp" @@ -78,7 +77,7 @@ ShapeInferPtr SDPAShapeInferFactory::makeShapeInfer() const { return std::make_shared(config); } // fallback to ngraph shape infer on non-perf-critical case - return std::make_shared(make_shape_inference(m_op), EMPTY_PORT_MASK); + return make_shape_inference(m_op); } } // namespace node diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp index bb280a4356074e..4850edae80a9c8 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp @@ -5,7 +5,7 @@ #include "strided_slice.hpp" #include "utils.hpp" #include "slice_shape_inference.hpp" -#include "shape_inference/shape_inference_ngraph.hpp" +#include "shape_inference/shape_inference.hpp" namespace ov { namespace intel_cpu { @@ -75,13 +75,13 @@ Result StridedSliceShapeInfer::infer( ShapeInferPtr StridedSliceShapeInferFactory::makeShapeInfer() const { if (const auto Slice_op = ov::as_type_ptr(m_op)) { - return std::make_shared(make_shape_inference(m_op), port_mask); + return make_shape_inference(m_op); } else if (const auto SliceScatter_op = ov::as_type_ptr(m_op)) { - return std::make_shared(make_shape_inference(m_op), PortMask(2, 3, 4, 5)); + return make_shape_inference(m_op); } else if (const auto StridedSlice_op = ov::as_type_ptr(m_op)) { const auto& ellipsis_mask = StridedSlice_op->get_ellipsis_mask(); if (std::any_of(ellipsis_mask.begin(), ellipsis_mask.end(), [](int64_t x){ return x == 1; })) { - return std::make_shared(make_shape_inference(m_op), port_mask); + return make_shape_inference(m_op); } else { auto vec_to_set = [](const std::vector& vec){ std::unordered_set to_set; diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index 2dccce257ae116..4ac8839b749d47 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -71,6 +71,7 @@ #include "matmul_shape_inference.hpp" #include "matrix_nms_shape_inference.hpp" #include "max_pool_shape_inference.hpp" +#include "memory_accessor.hpp" #include "multinomial_shape_inference.hpp" #include "nms_shape_inference.hpp" #include "nv12_shape_inference.hpp" @@ -135,12 +136,11 @@ namespace intel_cpu { class ShapeInferBase : public IStaticShapeInfer { public: using iface_type = IStaticShapeInfer; - virtual ~ShapeInferBase() = default; - ShapeInferBase(std::shared_ptr node) : m_input_ranks{}, m_node{node} { + ShapeInferBase(std::shared_ptr node) : m_input_ranks{}, m_node{std::move(node)} { static_assert(std::is_same::value, "Rank type not match to input_ranks type."); - for (size_t i = 0; i < node->get_input_size(); ++i) { - const auto& shape = node->get_input_partial_shape(i); + for (size_t i = 0; i < m_node->get_input_size(); ++i) { + const auto& shape = m_node->get_input_partial_shape(i); const auto& rank_length = shape.rank().is_static() ? shape.rank().get_length() : -1; m_input_ranks.push_back(rank_length); } @@ -152,6 +152,23 @@ class ShapeInferBase : public IStaticShapeInfer { return {std::vector{input_shapes[0]}}; } + IShapeInfer::Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + const auto& input_ranks = get_input_ranks(); + const auto inputs_count = input_shapes.size(); + OPENVINO_ASSERT(input_ranks.size() <= inputs_count, "Too few input shapes passed to Shape infer."); + std::vector input_static_shapes; + + input_static_shapes.reserve(inputs_count); + for (size_t port = 0; port < input_ranks.size(); ++port) { + input_static_shapes.push_back(input_ranks[port] == 0 ? StaticShapeRef() : input_shapes[port].get()); + } + + // call shape inference API + auto shape_infer_result = infer(input_static_shapes, MemoryAccessor(data_dependency, input_ranks)); + return shape_infer_result ? move_shapes_to_result(*shape_infer_result) : Result{{}, ShapeInferStatus::skip}; + } + const ov::CoordinateDiff& get_pads_begin() override { OPENVINO_ASSERT(false, "ShapeInferBase do not support get_pads_begin() by default."); } @@ -165,12 +182,21 @@ class ShapeInferBase : public IStaticShapeInfer { } port_mask_t get_port_mask() const override { - return 0; + return EMPTY_PORT_MASK; } protected: std::vector m_input_ranks; std::shared_ptr m_node; + +private: + static Result move_shapes_to_result(std::vector& output_shapes) { + Result result{decltype(Result::dims){output_shapes.size()}, ShapeInferStatus::success}; + std::transform(output_shapes.begin(), output_shapes.end(), result.dims.begin(), [](StaticShape& s) { + return std::move(*s); + }); + return result; + } }; /** @@ -241,7 +267,7 @@ class ShapeInferFallback : public ShapeInferBase { } }; -template +template class ShapeInferTA : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; @@ -264,7 +290,7 @@ class ShapeInferTA : public ShapeInferBase { * @tparam TOp Type of operator. */ template -class ShapeInferTA : public ShapeInferBase { +class ShapeInferTA : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; @@ -297,7 +323,7 @@ class ShapeInferPaddingBase : public ShapeInferBase { * @tparam TOp Type of operator. * @tparam MASK The bit mask where each bit corresponds to an input port number. */ -template +template class ShapeInferPaddingTA : public ShapeInferPaddingBase { public: using ShapeInferPaddingBase::ShapeInferPaddingBase; @@ -319,7 +345,7 @@ class ShapeInferPaddingTA : public ShapeInferPaddingBase { * @tparam MASK The bit mask where each bit corresponds to an input port number. */ template -class ShapeInferPaddingTA : public ShapeInferPaddingBase { +class ShapeInferPaddingTA : public ShapeInferPaddingBase { public: using ShapeInferPaddingBase::ShapeInferPaddingBase; @@ -337,7 +363,7 @@ class ShapeInferPaddingTA : public ShapeInferPaddingBase { * \tparam Args TypesInference object ctor args. */ template -class ShapeInferFactory { +class ShapeInferenceFactory { public: // Helper type to define specific Makers map values. using TValue = std::function; @@ -398,7 +424,7 @@ using namespace ov::opset10; // Helper types for IStaticShapeInfer makers. using IStaticShapeInferFactory = - ShapeInferFactory, std::shared_ptr>; + ShapeInferenceFactory, std::shared_ptr>; // clang-format off // Initialization map for operators supporting IStaticShapeInfer objects. @@ -584,18 +610,50 @@ const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{ #undef _OV_OP_SHAPE_INFER_MASK_REG #undef _OV_OP_SHAPE_INFER_VA_REG +class ShapeInferCustomMask : public IShapeInfer { +public: + ShapeInferCustomMask(ShapeInferPtr shape_infer, port_mask_t port_mask) + : m_shape_infer{std::move(shape_infer)}, + m_port_mask{port_mask} {} + + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + return m_shape_infer->infer(input_shapes, data_dependency); + } + + const ov::CoordinateDiff& get_pads_begin() override { + return m_shape_infer->get_pads_begin(); + } + + const ov::CoordinateDiff& get_pads_end() override { + return m_shape_infer->get_pads_end(); + } + + port_mask_t get_port_mask() const override { + return m_port_mask; + } + +private: + const ShapeInferPtr m_shape_infer; + const port_mask_t m_port_mask; +}; + std::shared_ptr make_shape_inference(std::shared_ptr op) { if (auto shape_infer = IStaticShapeInferFactory::make(op->get_type_info(), op)) { return shape_infer; } else if (ov::is_type(op)) { - return std::make_shared(op); + return std::make_shared(std::move(op)); } else if (ov::is_type(op) || ov::is_type(op) || ov::is_type(op)) { - return std::make_shared(op); + return std::make_shared(std::move(op)); } else { - return std::make_shared(op); + return std::make_shared(std::move(op)); } } + +ShapeInferPtr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask) { + return std::make_shared(make_shape_inference(std::move(op)), port_mask); +} } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp index a4adf753b687a2..10d65f0675d262 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp @@ -4,20 +4,19 @@ #pragma once -#include -#include - +#include "openvino/core/core.hpp" +#include "openvino/core/node.hpp" #include "ov_optional.hpp" -#include "shape_inference_status.hpp" -#include "static_shape.hpp" +#include "shape_inference/shape_inference_cpu.hpp" +#include "shape_inference/static_shape.hpp" #include "tensor_data_accessor.hpp" namespace ov { namespace intel_cpu { -class IStaticShapeInfer { +class IStaticShapeInfer : public IShapeInfer { public: - using port_mask_t = uint32_t; //!< Operator's port mask to indicate input data dependency + using IShapeInfer::infer; /** * @brief Do shape inference. @@ -29,22 +28,10 @@ class IStaticShapeInfer { virtual ov::optional> infer(const std::vector& input_shapes, const ov::ITensorAccessor& tensor_accessor) = 0; - /** - * @brief Some shape inference implementation may require input data stored inside the input tensors. To define - * which inputs data are required, the port mask is used. Each set bit corresponds to the specific input port - * number. - * - * @return port_mask_t a bit mask where each bit corresponds to an input port number. - */ - virtual port_mask_t get_port_mask() const = 0; - - // infer may generate padding as by-product, these APIs is designed to retrieve them back - virtual const ov::CoordinateDiff& get_pads_begin() = 0; - virtual const ov::CoordinateDiff& get_pads_end() = 0; - virtual const std::vector& get_input_ranks() = 0; }; std::shared_ptr make_shape_inference(std::shared_ptr op); +ShapeInferPtr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask); } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.cpp index 3b759c21803092..c94572cfb1868e 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference_cpu.cpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shape_inference_ngraph.hpp" +#include "shape_inference/shape_inference.hpp" +#include "shape_inference/shape_inference_cpu.hpp" namespace ov { namespace intel_cpu { NgraphShapeInferFactory::NgraphShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr NgraphShapeInferFactory::makeShapeInfer() const { - return std::make_shared(make_shape_inference(m_op)); + return make_shape_inference(m_op); } const ov::CoordinateDiff ShapeInferEmptyPads::m_emptyVec = {}; diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.cpp deleted file mode 100644 index 897e28aad42c78..00000000000000 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.cpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "shape_inference_ngraph.hpp" -#include "memory_accessor.hpp" - -#include - -using namespace ov::intel_cpu; - -NgraphShapeInfer::NgraphShapeInfer(std::shared_ptr shape_infer) - : m_shape_infer(std::move(shape_infer)), - m_port_mask(m_shape_infer->get_port_mask()) {} - -NgraphShapeInfer::NgraphShapeInfer(std::shared_ptr shape_infer, IShapeInfer::port_mask_t port_mask) - : m_shape_infer(std::move(shape_infer)), - m_port_mask(port_mask) {} - -IShapeInfer::Result -NgraphShapeInfer::infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) { - const auto& iranks = m_shape_infer->get_input_ranks(); - OPENVINO_ASSERT(iranks.size() <= input_shapes.size(), "Too few input shapes passed to Shape infer."); - std::vector input_static_shapes; - - input_static_shapes.reserve(input_shapes.size()); - - for (size_t port = 0; port < iranks.size(); port++) { - if (iranks[port] == 0) { - input_static_shapes.emplace_back(); - } else { - input_static_shapes.emplace_back(input_shapes[port].get()); - } - } - - // call shape inference API - auto shape_infer_result = m_shape_infer->infer(input_static_shapes, MemoryAccessor(data_dependency, iranks)); - - Result result{{}, shape_infer_result ? ShapeInferStatus::success : ShapeInferStatus::skip}; - - if (shape_infer_result) { - result.dims.reserve(shape_infer_result->size()); - std::transform(shape_infer_result->begin(), - shape_infer_result->end(), - std::back_inserter(result.dims), - [](StaticShape& s) { - return std::move(*s); - }); - } - - return result; -} diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.hpp deleted file mode 100644 index 450d4e94055946..00000000000000 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference_ngraph.hpp +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "shape_inference_cpu.hpp" -#include "shape_inference.hpp" - -namespace ov { -namespace intel_cpu { - -/** - * @brief This class wraps core specific shape inference class to implement CPU plugin specific interface. - */ -class NgraphShapeInfer : public IShapeInfer { -public: - /** - * @brief Wraps IStaticShapeInfer into IShapeInfer interface. Will use port mask defined in IStaticShapeInfer. - * - * @param shape_infer Instance of IStaticShapeInfer. - */ - NgraphShapeInfer(std::shared_ptr shape_infer); - - /** - * @brief Wraps IStaticShapeInfer into IShapeInfer interface. Will use port mask defined by user - * - * @param shape_infer Instance of IStaticShapeInfer. - * @param port_mask Port mask define. - */ - NgraphShapeInfer(std::shared_ptr shape_infer, IShapeInfer::port_mask_t port_mask); - - Result infer( - const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override; - - // infer may generate padding as by-product, these APIs is designed to retrieve them back - const ov::CoordinateDiff& get_pads_begin() override { - return m_shape_infer->get_pads_begin(); - } - const ov::CoordinateDiff& get_pads_end() override { - return m_shape_infer->get_pads_end(); - } - port_mask_t get_port_mask() const override { - return m_port_mask; - } - -private: - std::shared_ptr m_shape_infer; - IShapeInfer::port_mask_t m_port_mask; -}; - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp index bd81bcf1a41c63..57f098e1f234d2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp @@ -257,7 +257,7 @@ const std::map>>& activat {SoftSign, {{}}}, {SoftPlus, {{}}}, {IsFinite, {{}}}, - {IsNaN, {{}}}, + {IsNaN, {{}}}, }; return activationTypes; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp index 9b7ae687e9c81d..bffb0787333185 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/reduce_ops.cpp @@ -32,7 +32,7 @@ const std::vector> input_shapes = { const std::vector> input_shapes_0_dim = { std::vector{2, 0, 4, 1}, std::vector{8, 0, 4, 0}, - std::vector{0, 0, 0, 0}, + std::vector{2, 3, 4, 0}, }; const std::vector> input_shapes_one_axis = { @@ -60,6 +60,11 @@ const std::vector> axes = { {1, -1} }; +const std::vector> axes_0_dim = { + {1, 3}, + {0, 1, 3} +}; + std::vector op_types = { ov::test::utils::OpType::SCALAR, ov::test::utils::OpType::VECTOR, @@ -174,7 +179,7 @@ const auto params_reduction_types = testing::Combine( ); const auto params_empty_input = testing::Combine( - testing::ValuesIn(axes), + testing::ValuesIn(axes_0_dim), testing::Values(op_types[1]), testing::ValuesIn(keep_dims), testing::ValuesIn(reduction_types), diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_avg_pool_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_avg_pool_shape_inference_test.cpp index a82769e90e0ed4..bc34ae398af85d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_avg_pool_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_avg_pool_shape_inference_test.cpp @@ -24,7 +24,7 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, default_ctor) { const std::unordered_map const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}}; op = make_op(); - input_shapes = ShapeVector{{1, 3, 1, 2}, {2}}; + input_shapes = StaticShapeVector{{1, 3, 1, 2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -37,7 +37,7 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_as_constant) op = make_op(data, out_shape); - input_shapes = ShapeVector{{1, 3, 10}, {1}}; + input_shapes = StaticShapeVector{{1, 3, 10}, {1}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -53,7 +53,7 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map) int32_t spatial_dims[] = {9, 8, 7}; const std::unordered_map const_data{{1, {element::i32, ov::Shape{3}, spatial_dims}}}; - input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}}; + input_shapes = StaticShapeVector{{1, 3, 10, 2, 4}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -69,7 +69,7 @@ TEST_F(AdaptiveAvgPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map_ int32_t spatial_dims[] = {9, 8}; const std::unordered_map const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}}; - input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}}; + input_shapes = StaticShapeVector{{1, 3, 10, 2, 4}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), ov::NodeValidationFailure, HasSubstr("Number of spatial dimensions is not compatible with input data rank")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_max_pool_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_max_pool_shape_inference_test.cpp index 5ba4415a593b87..8c8b1cff32075e 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_max_pool_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/adaptive_max_pool_shape_inference_test.cpp @@ -24,7 +24,7 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, default_ctor) { const std::unordered_map const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}}; op = make_op(); - input_shapes = ShapeVector{{1, 3, 1, 2}, {2}}; + input_shapes = StaticShapeVector{{1, 3, 1, 2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 2); @@ -37,7 +37,7 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_as_constant) op = make_op(data, out_shape); - input_shapes = ShapeVector{{1, 3, 10}, {1}}; + input_shapes = StaticShapeVector{{1, 3, 10}, {1}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 2); @@ -53,7 +53,7 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map) int32_t spatial_dims[] = {9, 8, 7}; const std::unordered_map const_data{{1, {element::i32, ov::Shape{3}, spatial_dims}}}; - input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}}; + input_shapes = StaticShapeVector{{1, 3, 10, 2, 4}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 2); @@ -69,7 +69,7 @@ TEST_F(AdaptiveMaxPoolV8StaticShapeInferenceTest, out_spatial_dims_in_const_map_ int32_t spatial_dims[] = {9, 8}; const std::unordered_map const_data{{1, {element::i32, ov::Shape{2}, spatial_dims}}}; - input_shapes = ShapeVector{{1, 3, 10, 2, 4}, {3}}; + input_shapes = StaticShapeVector{{1, 3, 10, 2, 4}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), ov::NodeValidationFailure, HasSubstr("Number of spatial dimensions is not compatible with input data rank")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/avg_pool_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/avg_pool_shape_inference_test.cpp index 25e65d7bc680c6..1993a74edcf9e6 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/avg_pool_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/avg_pool_shape_inference_test.cpp @@ -26,7 +26,7 @@ TYPED_TEST_P(AvgPoolCommonStaticShapeInferenceTest, default_ctor) { this->op->set_rounding_type(op::RoundingType::FLOOR); this->op->set_auto_pad(op::PadType::VALID); - this->input_shapes = ShapeVector{{1, 3, 10, 12}}; + this->input_shapes = StaticShapeVector{{1, 3, 10, 12}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -49,7 +49,7 @@ TYPED_TEST_P(AvgPoolCommonStaticShapeInferenceTest, no_auto_pad_round_floor) { this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, false, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{1, 3, 10, 12}}; + this->input_shapes = StaticShapeVector{{1, 3, 10, 12}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -72,7 +72,7 @@ TYPED_TEST_P(AvgPoolCommonStaticShapeInferenceTest, auto_padding_same_lower_roun this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, false, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{1, 3, 10, 12, 20}}; + this->input_shapes = StaticShapeVector{{1, 3, 10, 12, 20}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -95,7 +95,7 @@ TYPED_TEST_P(AvgPoolCommonStaticShapeInferenceTest, auto_padding_same_upper_roun this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, true, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{1, 3, 10, 12, 20}}; + this->input_shapes = StaticShapeVector{{1, 3, 10, 12, 20}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -118,7 +118,7 @@ TYPED_TEST_P(AvgPoolCommonStaticShapeInferenceTest, auto_padding_same_upper_roun this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, true, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{32, 32, 2, 2, 4}}; + this->input_shapes = StaticShapeVector{{32, 32, 2, 2, 4}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -153,7 +153,7 @@ TEST_F(AvgPoolV14StaticShapeInferenceTest, explicit_padding_ceil_torch) { this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, true, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{1, 3, 9, 9}}; + this->input_shapes = StaticShapeVector{{1, 3, 9, 9}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -174,7 +174,7 @@ TEST_F(AvgPoolV14StaticShapeInferenceTest, explicit_padding_ceil_torch_no_stride this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, false, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{1, 3, 9, 9}}; + this->input_shapes = StaticShapeVector{{1, 3, 9, 9}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -195,7 +195,7 @@ TEST_F(AvgPoolV14StaticShapeInferenceTest, auto_padding_ceil_torch) { this->op = this->make_op(data, strides, pads_begin, pads_end, kernel_shape, false, rounding_mode, pad_type); - this->input_shapes = ShapeVector{{1, 3, 9, 9}}; + this->input_shapes = StaticShapeVector{{1, 3, 9, 9}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/batch_to_space_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/batch_to_space_shape_inference_test.cpp index 0d4b75a16060cd..67f6c9df3e66aa 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/batch_to_space_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/batch_to_space_shape_inference_test.cpp @@ -35,9 +35,10 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, default_ctor) { int32_t crops_begin_val[] = {0, 2, 0, 0, 0}; int32_t crops_end_val[] = {0, 2, 1, 0, 0}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{5}, block_val}}, - {2, {element::i32, Shape{5}, crops_begin_val}}, - {3, {element::i32, Shape{5}, crops_end_val}}}; + const auto constant_data = + std::unordered_map{{1, {element::i32, ov::Shape{5}, block_val}}, + {2, {element::i32, ov::Shape{5}, crops_begin_val}}, + {3, {element::i32, ov::Shape{5}, crops_end_val}}}; input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}}; output_shapes = shape_inference(op.get(), input_shapes, constant_data); @@ -52,9 +53,10 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, blocks_crops_in_constant_map) { int32_t crops_begin_val[] = {0, 2, 0, 0, 0}; int32_t crops_end_val[] = {0, 2, 1, 0, 0}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{5}, block_val}}, - {2, {element::i32, Shape{5}, crops_begin_val}}, - {3, {element::i32, Shape{5}, crops_end_val}}}; + const auto constant_data = + std::unordered_map{{1, {element::i32, ov::Shape{5}, block_val}}, + {2, {element::i32, ov::Shape{5}, crops_begin_val}}, + {3, {element::i32, ov::Shape{5}, crops_end_val}}}; input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}}; @@ -64,9 +66,9 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, blocks_crops_in_constant_map) { TEST_F(BatchToSpaceV1StaticShapeInferenceTest, blocs_crops_as_constants) { auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); - auto block_shape = std::make_shared(element::i64, Shape{4}, std::vector{1, 10, 5, 1}); - auto crops_begin = std::make_shared(element::i64, Shape{4}, std::vector{0, 3, 1, 0}); - auto crops_end = std::make_shared(element::i64, Shape{4}, std::vector{0, 3, 0, 0}); + auto block_shape = std::make_shared(element::i64, ov::Shape{4}, std::vector{1, 10, 5, 1}); + auto crops_begin = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 3, 1, 0}); + auto crops_end = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 3, 0, 0}); op = make_op(data, block_shape, crops_begin, crops_end); input_shapes = {{100, 7, 13, 3}, {4}, {4}, {4}}; @@ -81,8 +83,8 @@ TEST_F(BatchToSpaceV1StaticShapeInferenceTest, missing_tensor_data) { int32_t block_val[] = {1, 6, 5, 1, 16}; int32_t crops_end_val[] = {0, 2, 1, 0, 0}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{5}, block_val}}, - {3, {element::i32, Shape{5}, crops_end_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{5}, block_val}}, + {3, {element::i32, ov::Shape{5}, crops_end_val}}}; input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}}; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/bec_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/bec_shape_inference_test.cpp index 496c06bb2ebf1d..0d2540f9bcc6ad 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/bec_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/bec_shape_inference_test.cpp @@ -16,7 +16,7 @@ template class BECStaticShapeInferenceTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - this->output_shapes = ShapeVector(1); + this->output_shapes = StaticShapeVector(1); } }; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/bel_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/bel_shape_inference_test.cpp index 2f0760b90efebc..212b5555d34005 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/bel_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/bel_shape_inference_test.cpp @@ -16,7 +16,7 @@ template class BELStaticShapeInferenceTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - this->output_shapes = ShapeVector(1); + this->output_shapes = StaticShapeVector(1); } element::Type dtype{element::boolean}; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_convolution_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_convolution_shape_inference_test.cpp index 1a27bcca224d44..d411137c0148a8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_convolution_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/binary_convolution_shape_inference_test.cpp @@ -30,7 +30,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, default_ctor) { op->set_pads_end({2, 1}); op->set_auto_pad(op::PadType::VALID); - input_shapes = ShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -50,7 +50,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, default_ctor_three_input_sha op->set_auto_pad(op::PadType::VALID); // Third input shape (bias) can be provided, but is not used - input_shapes = ShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}, {2}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}, {2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -73,7 +73,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, auto_pads_same_lower_inputs_ op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -92,7 +92,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, auto_pad_same_lower_inputs_s op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -111,7 +111,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, data_and_filters_num_channel op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); - input_shapes = ShapeVector{{3, 5, 5, 5}, {7, 6, 3, 3}}; + input_shapes = StaticShapeVector{{3, 5, 5, 5}, {7, 6, 3, 3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, @@ -130,7 +130,7 @@ TEST_F(BinaryConvolutionV1StaticShapeInferenceTest, data_rank_not_4) { op = make_op(data, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); - input_shapes = ShapeVector{{3, 6, 5}, {7, 6, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5}, {7, 6, 3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/bucketize_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/bucketize_shape_inference_test.cpp index bb1ddfdf03b02f..508430ba88471d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/bucketize_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/bucketize_shape_inference_test.cpp @@ -21,7 +21,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, default_ctor) { op->set_output_type(element::i32); op->set_with_right_bound(false); - input_shapes = ShapeVector{{3, 2, 7, 89}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 7, 89}, {3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -33,7 +33,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, dynamic_rank_inputs) { const auto buckets = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(data, buckets, element::i32); - input_shapes = ShapeVector{{10, 12, 1}, {5}}; + input_shapes = StaticShapeVector{{10, 12, 1}, {5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -45,7 +45,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, static_rank_inputs) { const auto buckets = std::make_shared(element::f32, PartialShape{-1}); op = make_op(data, buckets); - input_shapes = ShapeVector{{100, 11}, {1}}; + input_shapes = StaticShapeVector{{100, 11}, {1}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -57,7 +57,7 @@ TEST_F(BucketizeV3StaticShapeInferenceTest, bucket_incorrect_rank) { const auto buckets = std::make_shared(element::f32, PartialShape{-1}); op = make_op(data, buckets, element::i32); - input_shapes = ShapeVector{{100, 11}, {2, 1}}; + input_shapes = StaticShapeVector{{100, 11}, {2, 1}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Buckets input must be a 1D tensor")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/col2im_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/col2im_shape_inference_test.cpp index 8e0fc06dde142c..e1bb0248b1e735 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/col2im_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/col2im_shape_inference_test.cpp @@ -30,16 +30,16 @@ TEST_F(Col2ImStaticShapeInferenceTest, kernel_size_and_output_size_from_tensor_a const auto kernel_size = std::make_shared(element::i64, ov::PartialShape::dynamic()); const auto strides = Strides{2, 2}; const auto dilations = Strides{2, 2}; - const auto pads_begin = Shape{2, 2}; - const auto pads_end = Shape{2, 2}; + const auto pads_begin = ov::Shape{2, 2}; + const auto pads_end = ov::Shape{2, 2}; const auto op = make_op(data, output_size, kernel_size, strides, dilations, pads_begin, pads_end); int64_t output_size_val[] = {32, 32}; int64_t kernel_size_val[] = {2, 2}; - auto const_inputs = std::unordered_map{{1, {element::i64, Shape{2}, output_size_val}}, - {2, {element::i64, Shape{2}, kernel_size_val}}}; + auto const_inputs = std::unordered_map{{1, {element::i64, ov::Shape{2}, output_size_val}}, + {2, {element::i64, ov::Shape{2}, kernel_size_val}}}; - const auto input_shapes = ShapeVector{Shape{3, 12, 289}, Shape{2}, Shape{2}}; + const auto input_shapes = StaticShapeVector{{3, 12, 289}, {2}, {2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -59,10 +59,10 @@ TEST_P(Col2ImStaticTestSuite, Col2ImStaticShapeInference) { const auto& expected_output_shape = std::get<7>(param); const auto data = std::make_shared(element::i64, data_shape); - const auto output_size = std::make_shared(element::i64, Shape{2}, output_size_val); - const auto kernel_size = std::make_shared(element::i64, Shape{2}, kernel_size_val); + const auto output_size = std::make_shared(element::i64, ov::Shape{2}, output_size_val); + const auto kernel_size = std::make_shared(element::i64, ov::Shape{2}, kernel_size_val); const auto op = std::make_shared(data, output_size, kernel_size, strides, dilations, pads_begin, pads_end); - const auto input_shapes = ShapeVector{data_shape, Shape{2}, Shape{2}}; + const auto input_shapes = StaticShapeVector{data_shape, {2}, {2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -71,63 +71,53 @@ TEST_P(Col2ImStaticTestSuite, Col2ImStaticShapeInference) { EXPECT_EQ(output_shapes.front(), StaticShape(expected_output_shape)); } -INSTANTIATE_TEST_SUITE_P( - Col2ImStaticShapeInferenceTests, - Col2ImStaticTestSuite, - ::testing::Values( - std::make_tuple( - Shape{3, 12, 81}, // data shape - std::vector{16, 16}, // output_size values - std::vector{2, 2}, // kernel_size values - Strides{2, 2}, // strides - Strides{2, 2}, // dilations - Shape{2, 2}, // pads_begin - Shape{2, 2}, // pads_end - Shape{3, 3, 16, 16}), // expected output shape - std::make_tuple( - Shape{12, 81}, // data shape - std::vector{16, 16}, // output_size values - std::vector{2, 2}, // kernel_size values - Strides{2, 2}, // strides - Strides{2, 2}, // dilations - Shape{2, 2}, // pads_begin - Shape{2, 2}, // pads_end - Shape{3, 16, 16}), // expected output shape - std::make_tuple( - Shape{3, 12, 225}, // data shape - std::vector{16, 16}, // output_size values - std::vector{2, 2}, // kernel_size values - Strides{1, 1}, // strides - Strides{1, 1}, // dilations - Shape{0, 0}, // pads_begin - Shape{0, 0}, // pads_end - Shape{3, 3, 16, 16}), // expected output shape - std::make_tuple( - Shape{1, 27, 49}, // data shape - std::vector{16, 16}, // output_size values - std::vector{3, 3}, // kernel_size values - Strides{2, 2}, // strides - Strides{2, 2}, // dilations - Shape{1, 1}, // pads_begin - Shape{1, 1}, // pads_end - Shape{1, 3, 16, 16}), // expected output shape - std::make_tuple( - Shape{1, 18, 104}, // data shape - std::vector{16, 16}, // output_size values - std::vector{2, 3}, // kernel_size values - Strides{2, 1}, // strides - Strides{2, 2}, // dilations - Shape{1, 0}, // pads_begin - Shape{0, 1}, // pads_end - Shape{1, 3, 16, 16}), // expected output shape - std::make_tuple( - Shape{12, 12, 324}, // data shape - std::vector{32, 32}, // output_size values - std::vector{2, 2}, // kernel_size values - Strides{2, 2}, // strides - Strides{2, 2}, // dilations - Shape{3, 3}, // pads_begin - Shape{3, 3}, // pads_end - Shape{12, 3, 32, 32}) // expected output shape - ) -); +INSTANTIATE_TEST_SUITE_P(Col2ImStaticShapeInferenceTests, + Col2ImStaticTestSuite, + ::testing::Values(std::make_tuple(ov::Shape{3, 12, 81}, // data shape + std::vector{16, 16}, // output_size values + std::vector{2, 2}, // kernel_size values + Strides{2, 2}, // strides + Strides{2, 2}, // dilations + ov::Shape{2, 2}, // pads_begin + ov::Shape{2, 2}, // pads_end + ov::Shape{3, 3, 16, 16}), // expected output shape + std::make_tuple(ov::Shape{12, 81}, // data shape + std::vector{16, 16}, // output_size values + std::vector{2, 2}, // kernel_size values + Strides{2, 2}, // strides + Strides{2, 2}, // dilations + ov::Shape{2, 2}, // pads_begin + ov::Shape{2, 2}, // pads_end + ov::Shape{3, 16, 16}), // expected output shape + std::make_tuple(ov::Shape{3, 12, 225}, // data shape + std::vector{16, 16}, // output_size values + std::vector{2, 2}, // kernel_size values + Strides{1, 1}, // strides + Strides{1, 1}, // dilations + ov::Shape{0, 0}, // pads_begin + ov::Shape{0, 0}, // pads_end + ov::Shape{3, 3, 16, 16}), // expected output shape + std::make_tuple(ov::Shape{1, 27, 49}, // data shape + std::vector{16, 16}, // output_size values + std::vector{3, 3}, // kernel_size values + Strides{2, 2}, // strides + Strides{2, 2}, // dilations + ov::Shape{1, 1}, // pads_begin + ov::Shape{1, 1}, // pads_end + ov::Shape{1, 3, 16, 16}), // expected output shape + std::make_tuple(ov::Shape{1, 18, 104}, // data shape + std::vector{16, 16}, // output_size values + std::vector{2, 3}, // kernel_size values + Strides{2, 1}, // strides + Strides{2, 2}, // dilations + ov::Shape{1, 0}, // pads_begin + ov::Shape{0, 1}, // pads_end + ov::Shape{1, 3, 16, 16}), // expected output shape + std::make_tuple(ov::Shape{12, 12, 324}, // data shape + std::vector{32, 32}, // output_size values + std::vector{2, 2}, // kernel_size values + Strides{2, 2}, // strides + Strides{2, 2}, // dilations + ov::Shape{3, 3}, // pads_begin + ov::Shape{3, 3}, // pads_end + ov::Shape{12, 3, 32, 32}))); // expected output shape diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp index 2770c72e3c5d34..e83a8bd7489e4a 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/concat_shape_inference_test.cpp @@ -41,28 +41,28 @@ class ConcatStaticShapeInferenceTest : public OpStaticShapeInferenceTest(element::f32, PartialShape::dynamic()); auto op = std::make_shared(NodeVector{1, param}, -1); - auto output_shapes = shape_inference(op.get(), ShapeVector{{4, 2, 1}}); + auto output_shapes = shape_inference(op.get(), StaticShapeVector{{4, 2, 1}}); ASSERT_EQ(output_shapes.front(), StaticShape({4, 2, 1})); - output_shapes = shape_inference(op.get(), ShapeVector{{1, 2, 0, 4, 5}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{1, 2, 0, 4, 5}}); ASSERT_EQ(output_shapes.front(), StaticShape({1, 2, 0, 4, 5})); } @@ -87,10 +87,10 @@ TEST(ConcatStaticShapeInferenceTest, consecutively_two_inputs) { auto param = std::make_shared(element::f32, PartialShape::dynamic()); auto op = std::make_shared(NodeVector{2, param}, -3); - auto output_shapes = shape_inference(op.get(), ShapeVector{{4, 2, 1}, {4, 2, 1}}); + auto output_shapes = shape_inference(op.get(), StaticShapeVector{{4, 2, 1}, {4, 2, 1}}); ASSERT_EQ(output_shapes.front(), StaticShape({8, 2, 1})); - output_shapes = shape_inference(op.get(), ShapeVector{{1, 2, 0, 4, 5}, {1, 2, 9, 4, 5}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{1, 2, 0, 4, 5}, {1, 2, 9, 4, 5}}); ASSERT_EQ(output_shapes.front(), StaticShape({1, 2, 9, 4, 5})); } @@ -98,13 +98,13 @@ TEST(ConcatStaticShapeInferenceTest, consecutively_two_inputs_with_wrong_rank_in auto param = std::make_shared(element::f32, PartialShape::dynamic()); auto op = std::make_shared(NodeVector{2, param}, -3); - auto output_shapes = shape_inference(op.get(), ShapeVector{{4, 2, 1}, {4, 2, 1}}); + auto output_shapes = shape_inference(op.get(), StaticShapeVector{{4, 2, 1}, {4, 2, 1}}); ASSERT_EQ(output_shapes.front(), StaticShape({8, 2, 1})); - auto wrong_rank_input_shapes = ShapeVector{{4}, {0}}; + auto wrong_rank_input_shapes = StaticShapeVector{{4}, {0}}; EXPECT_THROW(shape_inference(op.get(), wrong_rank_input_shapes), ov::AssertFailure); - output_shapes = shape_inference(op.get(), ShapeVector{{1, 2, 0, 4, 5}, {1, 2, 9, 4, 5}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{1, 2, 0, 4, 5}, {1, 2, 9, 4, 5}}); ASSERT_EQ(output_shapes.front(), StaticShape({1, 2, 9, 4, 5})); } @@ -112,15 +112,15 @@ TEST(ConcatStaticShapeInferenceTest, consecutively_three_inputs) { auto param = std::make_shared(element::f32, PartialShape::dynamic()); auto op = std::make_shared(NodeVector{3, param}, -1); - auto output_shapes = shape_inference(op.get(), ShapeVector{{4}, {0}, {2}}); + auto output_shapes = shape_inference(op.get(), StaticShapeVector{{4}, {0}, {2}}); ASSERT_EQ(output_shapes.front(), StaticShape({6})); - output_shapes = shape_inference(op.get(), ShapeVector{{2, 1}, {2, 1}, {2, 1}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{2, 1}, {2, 1}, {2, 1}}); ASSERT_EQ(output_shapes.front(), StaticShape({2, 3})); - output_shapes = shape_inference(op.get(), ShapeVector{{4, 2, 5}, {4, 2, 1}, {4, 2, 2}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{4, 2, 5}, {4, 2, 1}, {4, 2, 2}}); ASSERT_EQ(output_shapes.front(), StaticShape({4, 2, 8})); - output_shapes = shape_inference(op.get(), ShapeVector{{1, 2, 3, 4, 3}, {1, 2, 3, 4, 1}, {1, 2, 3, 4, 1}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{1, 2, 3, 4, 3}, {1, 2, 3, 4, 1}, {1, 2, 3, 4, 1}}); ASSERT_EQ(output_shapes.front(), StaticShape({1, 2, 3, 4, 5})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_backprop_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_backprop_shape_inference_test.cpp index 9b506d1d392718..f329168b3702c7 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_backprop_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_backprop_shape_inference_test.cpp @@ -33,7 +33,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor_direct_in auto pads_begin = CoordinateDiff{1, 1}; auto pads_end = CoordinateDiff{1, 1}; - input_shapes = ShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}}; + input_shapes = StaticShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}}; output_shapes = ov::op::v1::shape_infer(op.get(), input_shapes, pads_begin, pads_end); @@ -55,7 +55,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor_with_outp op->set_auto_pad(op::PadType::EXPLICIT); op->set_output_shape(spatial_shape.to_shape()); - input_shapes = ShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}}; + input_shapes = StaticShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -75,7 +75,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor) { op->set_output_padding({1, 1}); op->set_auto_pad(op::PadType::VALID); - input_shapes = ShapeVector{{1, 3, 10, 12}, {3, 3, 5, 5}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}, {3, 3, 5, 5}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -98,7 +98,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, default_ctor_more_inpu op->set_auto_pad(op::PadType::EXPLICIT); op->set_output_shape(spatial_shape.to_shape()); - input_shapes = ShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}, {0}}; + input_shapes = StaticShapeVector{{1, 20, 224, 224}, {20, 10, 3, 3}, {spatial_shape.size()}, {0}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -121,7 +121,7 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 2d_inputs_dynamic_rank op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5}, {6, 1, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5}, {6, 1, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -137,11 +137,11 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 3d_auto_pad_same_lower const auto data = std::make_shared(element::f32, PartialShape::dynamic(5)); const auto filters = std::make_shared(element::f32, PartialShape::dynamic(5)); - const auto out_spatial = op::v0::Constant::create(element::i64, Shape{3}, {2, 1, 3}); + const auto out_spatial = op::v0::Constant::create(element::i64, ov::Shape{3}, {2, 1, 3}); op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {6, 2, 3, 3, 3}, {3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5, 5}, {6, 2, 3, 3, 3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -161,9 +161,9 @@ TEST_F(ConvolutionBackpropDataV1StaticShapeInferenceTest, 3d_auto_pad_same_upper op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad); int32_t spatial_dims[] = {2, 6, 1}; - const auto const_map = std::unordered_map{{2, {element::i32, Shape{3}, spatial_dims}}}; + const auto const_map = std::unordered_map{{2, {element::i32, ov::Shape{3}, spatial_dims}}}; - input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {5, 7, 3, 3, 3}, {3}}; + input_shapes = StaticShapeVector{{3, 5, 5, 5, 5}, {5, 7, 3, 3, 3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_map); EXPECT_EQ(output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_shape_inference_test.cpp index 778723e5357fa0..5ed6d552d3c013 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/convolution_shape_inference_test.cpp @@ -27,7 +27,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, default_ctor) { op->set_pads_end({2, 1}); op->set_auto_pad(op::PadType::VALID); - input_shapes = ShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -47,7 +47,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, default_ctor_three_input_shapes) { op->set_auto_pad(op::PadType::VALID); // Third input shape (bias) can be provided, but is not used - input_shapes = ShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}, {2}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}, {2, 3, 5, 5}, {2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -70,7 +70,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, 2d_auto_pads_same_lower_inputs_dyn op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5}, {7, 6, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -89,7 +89,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_inputs_stat op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -108,7 +108,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, data_and_filters_num_channels_not_ op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {7, 6, 3, 3, 3}}; + input_shapes = StaticShapeVector{{3, 5, 5, 5, 5}, {7, 6, 3, 3, 3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, @@ -127,7 +127,7 @@ TEST_F(ConvolutionV1StaticShapeInferenceTest, data_rank_not_compatible_with_filt op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5, 5}, {7, 6, 3, 3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/ctc_loss_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/ctc_loss_shape_inference_test.cpp index 0c9fcaf6782052..e6d403d0189ec8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/ctc_loss_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/ctc_loss_shape_inference_test.cpp @@ -28,7 +28,7 @@ TEST_F(CTCLossV4StaticShapeInferenceTest, correct_input_shapes) { auto op = make_op(logits, logit_length, labels, label_length, blank_index); - input_shapes = ShapeVector{{10, 120, 28}, {10}, {10, 120}, {10}, {}}; + input_shapes = StaticShapeVector{{10, 120, 28}, {10}, {10, 120}, {10}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -38,7 +38,7 @@ TEST_F(CTCLossV4StaticShapeInferenceTest, correct_input_shapes) { TEST_F(CTCLossV4StaticShapeInferenceTest, default_ctor) { auto op = make_op(); - input_shapes = ShapeVector{{12, 120, 28}, {12}, {12, 120}, {12}, {}}; + input_shapes = StaticShapeVector{{12, 120, 28}, {12}, {12, 120}, {12}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/deformable_convolution_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/deformable_convolution_shape_inference_test.cpp index 9eabfeb809bb53..b3b9a05e28e0d0 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/deformable_convolution_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/deformable_convolution_shape_inference_test.cpp @@ -30,7 +30,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, default_ctor) { op->set_group(4); op->set_deformable_group(2); - input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 3, 1}, {4, 1, 3, 3}, {1, 18, 3, 1}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {1, 36, 3, 1}, {4, 1, 3, 3}, {1, 18, 3, 1}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -54,7 +54,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_lower_inputs_d op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2); - input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -75,7 +75,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_lower_inputs_d op = make_op(data, offsets, filters, masks, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2); - input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -95,7 +95,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_uper_inputs_st op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2); - input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -116,7 +116,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, pads_same_upper_inputs_s op = make_op(data, offsets, filters, masks, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2); - input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 18, 5, 5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -137,7 +137,7 @@ TEST_F(DeformableConvolutionV8StaticShapeInferenceTest, mask_channel_dimension_n op = make_op(data, offsets, filters, strides, pads_begin, pads_end, dilations, auto_pad, 4, 2); - input_shapes = ShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 17, 5, 5}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {1, 36, 5, 5}, {4, 1, 3, 3}, {1, 17, 5, 5}}; OV_EXPECT_THROW( shape_inference(op.get(), input_shapes), diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/einsum_shape_infernce_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/einsum_shape_infernce_test.cpp index 6113fdf8e13f72..53f7a036880baa 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/einsum_shape_infernce_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/einsum_shape_infernce_test.cpp @@ -16,7 +16,7 @@ TEST_F(EinsumStaticShapeInferenceTest, dot_product) { auto inputs = OutputVector(2, std::make_shared(element::f32, ov::PartialShape::dynamic())); auto op = make_op(inputs, "i,i->"); - output_shapes = shape_inference(op.get(), ShapeVector{{3}, {3}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{3}, {3}}); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{})); } @@ -24,7 +24,7 @@ TEST_F(EinsumStaticShapeInferenceTest, matmul) { auto inputs = OutputVector(2, std::make_shared(element::f32, ov::PartialShape::dynamic())); auto op = make_op(inputs, "ab,bc->ac"); - output_shapes = shape_inference(op.get(), ShapeVector{{2, 3}, {3, 4}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{2, 3}, {3, 4}}); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2, 4})); } @@ -32,7 +32,7 @@ TEST_F(EinsumStaticShapeInferenceTest, trace) { auto I1 = std::make_shared(element::f32, ov::PartialShape::dynamic()); auto op = make_op(OutputVector{I1}, "kii->k"); - output_shapes = shape_inference(op.get(), ShapeVector{{2, 3, 3}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{2, 3, 3}}); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2})); } @@ -40,7 +40,7 @@ TEST_F(EinsumStaticShapeInferenceTest, transpose) { auto I1 = std::make_shared(element::f32, ov::PartialShape::dynamic()); auto op = make_op(OutputVector{I1}, "ijk->kij"); - output_shapes = shape_inference(op.get(), ShapeVector{{1, 2, 3}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{1, 2, 3}}); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 1, 2})); } @@ -48,6 +48,6 @@ TEST_F(EinsumStaticShapeInferenceTest, multi_matmul) { auto inputs = OutputVector(3, std::make_shared(element::i32, ov::PartialShape::dynamic())); auto op = make_op(inputs, "ab,bcd,bc->ca"); - output_shapes = shape_inference(op.get(), ShapeVector{{2, 5}, {5, 3, 6}, {5, 3}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{2, 5}, {5, 3, 6}, {5, 3}}); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 2})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/embedding_segments_sum_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/embedding_segments_sum_test.cpp index e33c26a7a01b5a..dc1b2535412a8d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/embedding_segments_sum_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/embedding_segments_sum_test.cpp @@ -26,7 +26,7 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, default_ctor) { input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{4}, StaticShape{}, StaticShape{}, StaticShape{4}}; int64_t num_segments = 4; - const auto const_map = std::unordered_map{{3, {element::i64, Shape{}, &num_segments}}}; + const auto const_map = std::unordered_map{{3, {element::i64, ov::Shape{}, &num_segments}}}; output_shapes = shape_inference(op.get(), input_shapes, const_map); EXPECT_EQ(output_shapes[0], (StaticShape{4, 2, 6})); @@ -58,7 +58,7 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, constant_map) { input_shapes = {StaticShape{5, 2, 6}, StaticShape{4}, StaticShape{4}, StaticShape{}, StaticShape{}, StaticShape{4}}; int64_t num_segm_val = 3; - const auto const_map = std::unordered_map{{3, {element::i64, Shape{}, &num_segm_val}}}; + const auto const_map = std::unordered_map{{3, {element::i64, ov::Shape{}, &num_segm_val}}}; output_shapes = shape_inference(op.get(), input_shapes, const_map); EXPECT_EQ(output_shapes[0], (StaticShape{3, 2, 6})); @@ -74,11 +74,11 @@ TEST_F(EmbeddingSegmentsSumV3StaticShapeInferenceTest, basic) { auto op = make_op(emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights); - output_shapes = shape_inference(op.get(), ShapeVector{{5, 2}, {4}, {4}, {}, {}, {4}}); + output_shapes = shape_inference(op.get(), StaticShapeVector{{5, 2}, {4}, {4}, {}, {}, {4}}); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 2})); int64_t num_segm_val = 8; - const auto const_map = std::unordered_map{{3, {element::i64, Shape{}, &num_segm_val}}}; - output_shapes = shape_inference(op.get(), ShapeVector{{5, 2}, {4}, {4}, {}, {}, {4}}, const_map); + const auto const_map = std::unordered_map{{3, {element::i64, ov::Shape{}, &num_segm_val}}}; + output_shapes = shape_inference(op.get(), StaticShapeVector{{5, 2}, {4}, {4}, {}, {}, {4}}, const_map); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{8, 2})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_detection_output_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_detection_output_shape_inference_test.cpp index f2f29ed015af55..9e36b3f4c80ef7 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_detection_output_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_detection_output_shape_inference_test.cpp @@ -29,10 +29,10 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, default_c op = make_op(); op->set_attrs({.05f, .5f, 4.1352f, 12, 20, 7, false, {10.0f, 10.0f, 5.0f, 5.0f}}); - input_shapes = ShapeVector{{10, 4}, {10, 48}, {10, 12}, {1, 3}}; + input_shapes = StaticShapeVector{{10, 4}, {10, 48}, {10, 12}, {1, 3}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{7, 4}, {7}, {7}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{7, 4}, {7}, {7}})); } TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, inputs_dynamic_rank) { @@ -42,10 +42,10 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, inputs_dy const auto im_info = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(rois, deltas, scores, im_info, make_attrs()); - input_shapes = ShapeVector{{10, 4}, {10, 40}, {10, 10}, {1, 3}}; + input_shapes = StaticShapeVector{{10, 4}, {10, 40}, {10, 10}, {1, 3}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{5, 4}, {5}, {5}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{5, 4}, {5}, {5}})); } TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, inputs_static_rank) { @@ -55,10 +55,10 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, inputs_st const auto im_info = std::make_shared(element::f32, PartialShape::dynamic(2)); op = make_op(rois, deltas, scores, im_info, make_attrs()); - input_shapes = ShapeVector{{10, 4}, {10, 40}, {10, 10}, {1, 3}}; + input_shapes = StaticShapeVector{{10, 4}, {10, 40}, {10, 10}, {1, 3}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{5, 4}, {5}, {5}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{5, 4}, {5}, {5}})); } TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, im_info_bad_dimension) { @@ -68,7 +68,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, im_info_b const auto im_info = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(rois, deltas, scores, im_info, make_attrs()); - input_shapes = ShapeVector{{10, 4}, {10, 40}, {10, 10}, {3}}; + input_shapes = StaticShapeVector{{10, 4}, {10, 40}, {10, 10}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Input image info shape must be compatible with [1,3]")); @@ -81,7 +81,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, deltas_no const auto im_info = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(rois, deltas, scores, im_info, make_attrs()); - input_shapes = ShapeVector{{10, 4}, {10, 40, 1}, {10, 10}, {1, 3}}; + input_shapes = StaticShapeVector{{10, 4}, {10, 40, 1}, {10, 10}, {1, 3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Input deltas rank must be equal to 2")); @@ -94,7 +94,7 @@ TEST_F(ExperimentalDetectronDetectionOutputV6StaticShapeInferenceTest, rois_1st_ const auto im_info = std::make_shared(element::f32, PartialShape::dynamic(2)); op = make_op(rois, deltas, scores, im_info, make_attrs()); - input_shapes = ShapeVector{{9, 4}, {10, 40}, {10, 10}, {1, 3}}; + input_shapes = StaticShapeVector{{9, 4}, {10, 40}, {10, 10}, {1, 3}}; OV_EXPECT_THROW( shape_inference(op.get(), input_shapes), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_generate_proposal_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_generate_proposal_shape_inference_test.cpp index 93659ce27ebd63..d62394d2f433a0 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_generate_proposal_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_generate_proposal_shape_inference_test.cpp @@ -28,10 +28,10 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe op = make_op(); op->set_attrs({0.0f, 0.0f, 100, 0}); - input_shapes = ShapeVector{{3}, {12, 4}, {3, 12, 15}, {5, 12, 15}}; + input_shapes = StaticShapeVector{{3}, {12, 4}, {3, 12, 15}, {5, 12, 15}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{100, 4}, {100}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{100, 4}, {100}})); } TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTest, inputs_dynamic_rank) { @@ -41,10 +41,10 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe const auto scores = std::make_shared(element::f16, PartialShape::dynamic()); op = make_op(im_info, anchors, deltas, scores, make_attrs(100)); - input_shapes = ShapeVector{{3}, {12, 4}, {3, 12, 15}, {5, 12, 15}}; + input_shapes = StaticShapeVector{{3}, {12, 4}, {3, 12, 15}, {5, 12, 15}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{100, 4}, {100}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{100, 4}, {100}})); } TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTest, inputs_static_rank) { @@ -54,10 +54,10 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe const auto scores = std::make_shared(element::f16, PartialShape::dynamic(3)); op = make_op(im_info, anchors, deltas, scores, make_attrs(1000)); - input_shapes = ShapeVector{{3}, {12, 4}, {3, 120, 15}, {5, 120, 15}}; + input_shapes = StaticShapeVector{{3}, {12, 4}, {3, 120, 15}, {5, 120, 15}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{1000, 4}, {1000}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{1000, 4}, {1000}})); } TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTest, im_info_bad_dimension) { @@ -67,7 +67,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe const auto scores = std::make_shared(element::f32, PartialShape::dynamic(3)); op = make_op(im_info, anchors, deltas, scores, make_attrs(40)); - input_shapes = ShapeVector{{4}, {12, 4}, {3, 120, 15}, {5, 120, 15}}; + input_shapes = StaticShapeVector{{4}, {12, 4}, {3, 120, 15}, {5, 120, 15}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("The 'input_im_info' shape is expected to be a compatible with [3]")); @@ -80,7 +80,7 @@ TEST_F(ExperimentalDetectronGenerateProposalsSingleImageV6StaticShapeInferenceTe const auto scores = std::make_shared(element::f32, PartialShape::dynamic(3)); op = make_op(im_info, anchors, deltas, scores, make_attrs(40)); - input_shapes = ShapeVector{{3}, {12, 4}, {3, 120, 15, 1}, {5, 120, 15}}; + input_shapes = StaticShapeVector{{3}, {12, 4}, {3, 120, 15, 1}, {5, 120, 15}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("The 'input_deltas' input is expected to be a 3D")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_prior_grid_generator_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_prior_grid_generator_shape_inference_test.cpp index e3d11b0a16e799..fffd24c4807845 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_prior_grid_generator_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_prior_grid_generator_shape_inference_test.cpp @@ -29,10 +29,10 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, defaul op = make_op(); op->set_attrs({true, 0, 0, 5.0f, 5.0f}); - input_shapes = ShapeVector{{3, 4}, {1, 5, 7, 2}, {1, 5, 50, 50}}; + input_shapes = StaticShapeVector{{3, 4}, {1, 5, 7, 2}, {1, 5, 50, 50}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{42, 4}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{42, 4}})); } TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, inputs_dynamic_rank) { @@ -41,7 +41,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, inputs const auto im_data = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(priors, feat_map, im_data, make_attrs(false)); - input_shapes = ShapeVector{{10, 4}, {1, 2, 4, 5}, {1, 2, 100, 100}}; + input_shapes = StaticShapeVector{{10, 4}, {1, 2, 4, 5}, {1, 2, 100, 100}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -54,7 +54,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, inputs const auto im_data = std::make_shared(element::f32, PartialShape::dynamic(4)); op = make_op(priors, feat_map, im_data, make_attrs(true)); - input_shapes = ShapeVector{{10, 4}, {1, 2, 4, 5}, {1, 2, 100, 100}}; + input_shapes = StaticShapeVector{{10, 4}, {1, 2, 4, 5}, {1, 2, 100, 100}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -67,7 +67,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, feat_m const auto im_data = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(priors, feat_map, im_data, make_attrs(true)); - input_shapes = ShapeVector{{10, 4}, {1, 2, 4, 5, 1}, {1, 2, 100, 100}}; + input_shapes = StaticShapeVector{{10, 4}, {1, 2, 4, 5, 1}, {1, 2, 100, 100}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Feature_map rank must be equal to 4")); @@ -79,7 +79,7 @@ TEST_F(ExperimentalDetectronPriorGridGeneratorV6StaticShapeInferenceTest, priors const auto im_data = std::make_shared(element::f32, PartialShape::dynamic(4)); op = make_op(priors, feat_map, im_data, make_attrs(true)); - input_shapes = ShapeVector{{10, 5}, {1, 2, 4, 5}, {1, 2, 100, 100}}; + input_shapes = StaticShapeVector{{10, 5}, {1, 2, 4, 5}, {1, 2, 100, 100}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("The last dimension of the 'priors' input must be equal to 4")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_roi_feature_extractor_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_roi_feature_extractor_shape_inference_test.cpp index e9e29e0a481f2d..1d96c7363d2796 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_roi_feature_extractor_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_roi_feature_extractor_shape_inference_test.cpp @@ -27,7 +27,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, defau op = make_op(); op->set_attrs(make_attrs(16)); - input_shapes = ShapeVector{{1000, 4}, {1, 5, 8, 8}, {1, 5, 16, 16}, {1, 5, 64, 64}}; + input_shapes = StaticShapeVector{{1000, 4}, {1, 5, 8, 8}, {1, 5, 16, 16}, {1, 5, 64, 64}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1000, 5, 16, 16}, StaticShape{1000, 4})); @@ -39,7 +39,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, input const auto layer_1 = std::make_shared(element::f16, PartialShape::dynamic()); op = make_op(OutputVector{rois, layer_0, layer_1}, make_attrs(100)); - input_shapes = ShapeVector{{25, 4}, {1, 2, 100, 100}, {1, 2, 20, 300}}; + input_shapes = StaticShapeVector{{25, 4}, {1, 2, 100, 100}, {1, 2, 20, 300}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{25, 2, 100, 100}, StaticShape{25, 4})); @@ -53,7 +53,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, input const auto layer_3 = std::make_shared(element::f16, PartialShape::dynamic(4)); op = make_op(OutputVector{rois, layer_0, layer_1, layer_2, layer_3}, make_attrs(15)); - input_shapes = ShapeVector{{25, 4}, {1, 2, 100, 100}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 2, 200, 50}}; + input_shapes = StaticShapeVector{{25, 4}, {1, 2, 100, 100}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 2, 200, 50}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{25, 2, 15, 15}, StaticShape{25, 4})); @@ -66,7 +66,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, rois_ const auto layer_2 = std::make_shared(element::f16, PartialShape::dynamic(4)); op = make_op(OutputVector{rois, layer_0, layer_1, layer_2}, make_attrs(15)); - input_shapes = ShapeVector{{25, 4, 1}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 2, 200, 50}}; + input_shapes = StaticShapeVector{{25, 4, 1}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 2, 200, 50}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Input rois rank must be equal to 2")); @@ -79,7 +79,7 @@ TEST_F(ExperimentalDetectronROIFeatureExtractorV6StaticShapeInferenceTest, layer const auto layer_2 = std::make_shared(element::f16, PartialShape::dynamic()); op = make_op(OutputVector{rois, layer_0, layer_1, layer_2}, make_attrs(15)); - input_shapes = ShapeVector{{25, 4}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 3, 200, 50}}; + input_shapes = StaticShapeVector{{25, 4}, {1, 2, 20, 300}, {1, 2, 30, 30}, {1, 3, 200, 50}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("The number of channels must be the same for all layers of the pyramid")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_topkrois_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_topkrois_shape_inference_test.cpp index d8f0949e972abb..0c532211ea9526 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_topkrois_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/experimental_detectron_topkrois_shape_inference_test.cpp @@ -24,7 +24,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, default_ctor) { op = make_op(); op->set_max_rois(100); - input_shapes = ShapeVector{{12, 4}, {12}}; + input_shapes = StaticShapeVector{{12, 4}, {12}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -36,7 +36,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, inputs_dynamic_r const auto rois_probs = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(input_rois, rois_probs, 5); - input_shapes = ShapeVector{{10, 4}, {10}}; + input_shapes = StaticShapeVector{{10, 4}, {10}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -48,7 +48,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, inputs_static_ra const auto rois_probs = std::make_shared(element::f32, PartialShape::dynamic(1)); op = make_op(input_rois, rois_probs, 15); - input_shapes = ShapeVector{{100, 4}, {100}}; + input_shapes = StaticShapeVector{{100, 4}, {100}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -60,7 +60,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, input_rois_not_2 const auto rois_probs = std::make_shared(element::f32, PartialShape::dynamic(1)); op = make_op(input_rois, rois_probs, 5); - input_shapes = ShapeVector{{10, 4, 10}, {10}}; + input_shapes = StaticShapeVector{{10, 4, 10}, {10}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, @@ -72,7 +72,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, rois_prob_not_1d const auto rois_probs = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(input_rois, rois_probs, 5); - input_shapes = ShapeVector{{10, 4}, {10, 2}}; + input_shapes = StaticShapeVector{{10, 4}, {10, 2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, @@ -84,7 +84,7 @@ TEST_F(ExperimentalDetectronTopKROIsV6StaticShapeInferenceTest, input_rois_secon const auto rois_probs = std::make_shared(element::f32, PartialShape::dynamic(1)); op = make_op(input_rois, rois_probs, 5); - input_shapes = ShapeVector{{10, 5}, {10}}; + input_shapes = StaticShapeVector{{10, 5}, {10}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/extract_image_patches_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/extract_image_patches_shape_inference_test.cpp index 8eb775fc1af568..fa3f11792afaf5 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/extract_image_patches_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/extract_image_patches_shape_inference_test.cpp @@ -24,7 +24,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, default_ctor_no_args) { op->set_rates({1, 1}); op->set_auto_pad(pad_type); - input_shapes = ShapeVector{{10, 8, 12, 6}}; + input_shapes = StaticShapeVector{{10, 8, 12, 6}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -35,7 +35,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, data_input_is_dynamic_rank) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); op = make_op(data, ov::Shape{3, 3}, ov::Strides{5, 5}, ov::Shape{2, 2}, op::PadType::VALID); - input_shapes = ShapeVector{{2, 2, 23, 24}}; + input_shapes = StaticShapeVector{{2, 2, 23, 24}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -46,7 +46,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, data_input_is_static_rank) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic(4)); op = make_op(data, ov::Shape{3, 3}, ov::Strides{5, 5}, ov::Shape{1, 1}, op::PadType::SAME_UPPER); - input_shapes = ShapeVector{{2, 2, 43, 34}}; + input_shapes = StaticShapeVector{{2, 2, 43, 34}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -57,7 +57,7 @@ TEST_F(StaticShapeExtractImagePatchesV3Test, data_shape_not_compatible_rank_4) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic(4)); op = make_op(data, ov::Shape{3, 3}, ov::Strides{5, 5}, ov::Shape{1, 1}, op::PadType::SAME_UPPER); - OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 12, 24, 1}}), + OV_EXPECT_THROW(shape_inference(op.get(), StaticShapeVector{{2, 20, 12, 24, 1}}), NodeValidationFailure, HasSubstr("input tensor must be 4D tensor")); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/eye_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/eye_shape_inference_test.cpp index d848d3c04d1582..b5ac02ca8f2984 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/eye_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/eye_shape_inference_test.cpp @@ -23,14 +23,14 @@ class EyeV9StaticShapeInferenceTest : public OpStaticShapeInferenceTestget_shape(), cols->get_shape(), diag->get_shape(), batch->get_shape()}; + input_shapes = StaticShapeVector{rows->get_shape(), cols->get_shape(), diag->get_shape(), batch->get_shape()}; const auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -47,11 +47,11 @@ TEST_F(EyeV9StaticShapeInferenceTest, parameters_in_const_data_map) { int32_t rows = 3, cols = 8; auto batch = std::array{2, 4, 1}; - const auto const_data = std::unordered_map{{0, {element::i32, Shape{}, &rows}}, - {1, {element::i32, Shape{1}, &cols}}, - {3, {element::i32, Shape{3}, batch.data()}}}; + const auto const_data = std::unordered_map{{0, {element::i32, ov::Shape{}, &rows}}, + {1, {element::i32, ov::Shape{1}, &cols}}, + {3, {element::i32, ov::Shape{3}, batch.data()}}}; - input_shapes = ShapeVector{{}, {1}, {1}, {3}}; + input_shapes = StaticShapeVector{{}, {1}, {1}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -69,11 +69,11 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_negative_rows) { int64_t rows = -3, cols = 8; auto batch = std::array{2, 4, 1}; const auto const_data = - std::unordered_map{{0, {element::i32, Shape{}, &rows}}, - {1, {element::i32, Shape{1}, &cols}}, - {3, {element::i32, Shape{batch.size()}, batch.data()}}}; + std::unordered_map{{0, {element::i32, ov::Shape{}, &rows}}, + {1, {element::i32, ov::Shape{1}, &cols}}, + {3, {element::i32, ov::Shape{batch.size()}, batch.data()}}}; - input_shapes = ShapeVector{{}, {1}, {1}, {3}}; + input_shapes = StaticShapeVector{{}, {1}, {1}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), AssertFailure, @@ -91,11 +91,11 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_negative_columns) { int64_t rows = 3, cols = -8; auto batch = std::array{2, 4, 1}; const auto const_data = - std::unordered_map{{0, {element::i32, Shape{}, &rows}}, - {1, {element::i32, Shape{1}, &cols}}, - {3, {element::i32, Shape{batch.size()}, batch.data()}}}; + std::unordered_map{{0, {element::i32, ov::Shape{}, &rows}}, + {1, {element::i32, ov::Shape{1}, &cols}}, + {3, {element::i32, ov::Shape{batch.size()}, batch.data()}}}; - input_shapes = ShapeVector{{}, {1}, {1}, {3}}; + input_shapes = StaticShapeVector{{}, {1}, {1}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), AssertFailure, @@ -114,11 +114,11 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_rows_not_1D) { auto rows = std::array{2, 1}; auto batch = std::array{2, 4, 1}; const auto const_data = - std::unordered_map{{0, {element::i32, Shape{rows.size()}, &rows}}, - {1, {element::i32, Shape{1}, &cols}}, - {3, {element::i32, Shape{batch.size()}, batch.data()}}}; + std::unordered_map{{0, {element::i32, ov::Shape{rows.size()}, &rows}}, + {1, {element::i32, ov::Shape{1}, &cols}}, + {3, {element::i32, ov::Shape{batch.size()}, batch.data()}}}; - input_shapes = ShapeVector{{}, {1}, {1}, {3}}; + input_shapes = StaticShapeVector{{}, {1}, {1}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), NodeValidationFailure, @@ -137,11 +137,11 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_columns_not_1D) { auto cols = std::array{2, 1}; auto batch = std::array{2, 4, 1}; const auto const_data = - std::unordered_map{{0, {element::i32, Shape{}, &rows}}, - {1, {element::i32, Shape{cols.size()}, &cols}}, - {3, {element::i32, Shape{batch.size()}, batch.data()}}}; + std::unordered_map{{0, {element::i32, ov::Shape{}, &rows}}, + {1, {element::i32, ov::Shape{cols.size()}, &cols}}, + {3, {element::i32, ov::Shape{batch.size()}, batch.data()}}}; - input_shapes = ShapeVector{{1}, {}, {1}, {3}}; + input_shapes = StaticShapeVector{{1}, {}, {1}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), NodeValidationFailure, @@ -159,11 +159,11 @@ TEST_F(EyeV9StaticShapeInferenceTest, assert_on_batch_shape_not_match_shape_in_c int64_t rows = 8, cols = 5; auto batch = std::array{2, 4, 1}; const auto const_data = - std::unordered_map{{0, {element::i32, Shape{}, &rows}}, - {1, {element::i32, Shape{}, &cols}}, - {3, {element::i32, Shape{batch.size()}, batch.data()}}}; + std::unordered_map{{0, {element::i32, ov::Shape{}, &rows}}, + {1, {element::i32, ov::Shape{}, &cols}}, + {3, {element::i32, ov::Shape{batch.size()}, batch.data()}}}; - input_shapes = ShapeVector{{}, {}, {}, {2}}; + input_shapes = StaticShapeVector{{}, {}, {}, {2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/fft_base_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/fft_base_shape_inference_test.cpp index f8f87e37f3e2c0..eb7ef1035a1af8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/fft_base_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/fft_base_shape_inference_test.cpp @@ -26,8 +26,8 @@ static std::shared_ptr build_dft_signal() { static std::shared_ptr build_dft_constant() { auto input_shape = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); - auto axes = std::make_shared(element::i32, Shape{2}, std::vector{1, 2}); - auto signal = std::make_shared(element::i32, Shape{2}, std::vector{512, 100}); + auto axes = std::make_shared(element::i32, ov::Shape{2}, std::vector{1, 2}); + auto signal = std::make_shared(element::i32, ov::Shape{2}, std::vector{512, 100}); auto DFT_signal = std::make_shared(input_shape, axes, signal); return DFT_signal; } @@ -161,8 +161,8 @@ TEST(StaticShapeInferenceTest, RDFTWithSignalSizes) { TEST(StaticShapeInferenceTest, RDFTWithConstAxesAndSignalSizes) { auto input_shape = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); - auto axes = std::make_shared(element::i32, Shape{2}, std::vector{2, 3}); - auto signal = std::make_shared(element::i32, Shape{2}, std::vector{64, 64}); + auto axes = std::make_shared(element::i32, ov::Shape{2}, std::vector{2, 3}); + auto signal = std::make_shared(element::i32, ov::Shape{2}, std::vector{64, 64}); auto RDFT = std::make_shared(input_shape, axes, signal); std::vector static_input_shapes = {StaticShape{1, 120, 64, 64}, StaticShape{2}, StaticShape{2}}; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp index 79c715e775fd55..f3453adf3c1671 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_nd_shape_inference_test.cpp @@ -16,7 +16,7 @@ using namespace ov::intel_cpu; using namespace testing; struct GatherNDTestParams { - ShapeVector input_shapes; + StaticShapeVector input_shapes; StaticShape exp_shape; size_t batch_dims; }; @@ -55,39 +55,39 @@ class StaticShapeInferenceGatherNDTest : public OpStaticShapeInferenceTest{ // Test: batch_dims = 0 - GatherNDTestParams{ShapeVector{{8}, {1}}, StaticShape{}, 0}, - GatherNDTestParams{ShapeVector{{8}, {1, 1}}, StaticShape{1}, 0}, - GatherNDTestParams{ShapeVector{{8}, {5, 1}}, StaticShape{5}, 0}, - GatherNDTestParams{ShapeVector{{8, 11}, {2}}, StaticShape{}, 0}, - GatherNDTestParams{ShapeVector{{8, 11}, {5, 2}}, StaticShape{5}, 0}, - GatherNDTestParams{ShapeVector{{8, 11, 12}, {2}}, StaticShape{12}, 0}, - GatherNDTestParams{ShapeVector{{8, 11, 12}, {5, 2}}, StaticShape{5, 12}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {2}}, StaticShape{11, 12}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {2, 1}}, StaticShape{2, 3, 11, 12}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {2, 2}}, StaticShape{2, 11, 12}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {2, 5, 4}}, StaticShape{2, 5}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {2, 5, 20, 3}}, StaticShape{2, 5, 20, 12}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {6, 4, 2}}, StaticShape{6, 4, 11, 12}, 0}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {8, 4, 2}}, StaticShape{8, 4, 11, 12}, 0}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 1}}, StaticShape{8, 6, 5, 4, 3, 11, 12}, 0}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 2}}, StaticShape{8, 6, 5, 4, 11, 12}, 0}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 3}}, StaticShape{8, 6, 5, 4, 12}, 0}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 4}}, StaticShape{8, 6, 5, 4}, 0}, - GatherNDTestParams{ShapeVector{{7, 3, 11}, {8, 6, 5, 4, 1}}, StaticShape{8, 6, 5, 4, 3, 11}, 0}, + GatherNDTestParams{StaticShapeVector{{8}, {1}}, StaticShape{}, 0}, + GatherNDTestParams{StaticShapeVector{{8}, {1, 1}}, StaticShape{1}, 0}, + GatherNDTestParams{StaticShapeVector{{8}, {5, 1}}, StaticShape{5}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 11}, {2}}, StaticShape{}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 11}, {5, 2}}, StaticShape{5}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 11, 12}, {2}}, StaticShape{12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 11, 12}, {5, 2}}, StaticShape{5, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {2}}, StaticShape{11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {2, 1}}, StaticShape{2, 3, 11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {2, 2}}, StaticShape{2, 11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {2, 5, 4}}, StaticShape{2, 5}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {2, 5, 20, 3}}, StaticShape{2, 5, 20, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {6, 4, 2}}, StaticShape{6, 4, 11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {8, 4, 2}}, StaticShape{8, 4, 11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 1}}, StaticShape{8, 6, 5, 4, 3, 11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 2}}, StaticShape{8, 6, 5, 4, 11, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 3}}, StaticShape{8, 6, 5, 4, 12}, 0}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {8, 6, 5, 4, 4}}, StaticShape{8, 6, 5, 4}, 0}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11}, {8, 6, 5, 4, 1}}, StaticShape{8, 6, 5, 4, 3, 11}, 0}, // Test: batch_dims = 1 - GatherNDTestParams{ShapeVector{{8, 11}, {8, 1}}, StaticShape{8}, 1}, - GatherNDTestParams{ShapeVector{{8, 11, 12}, {8, 1}}, StaticShape{8, 12}, 1}, - GatherNDTestParams{ShapeVector{{8, 11, 12}, {8, 2}}, StaticShape{8}, 1}, - GatherNDTestParams{ShapeVector{{8, 11, 12}, {8, 5, 1}}, StaticShape{8, 5, 12}, 1}, - GatherNDTestParams{ShapeVector{{8, 11, 12}, {8, 5, 2}}, StaticShape{8, 5}, 1}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {8, 2}}, StaticShape{8, 12}, 1}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {8, 2, 1}}, StaticShape{8, 2, 11, 12}, 1}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {8, 5, 2}}, StaticShape{8, 5, 12}, 1}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {8, 5, 3}}, StaticShape{8, 5}, 1}, - GatherNDTestParams{ShapeVector{{8, 3, 11, 12}, {8, 7, 4, 2}}, StaticShape{8, 7, 4, 12}, 1}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {7, 6, 5, 4, 1}}, StaticShape{7, 6, 5, 4, 11, 12}, 1}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {7, 6, 5, 4, 2}}, StaticShape{7, 6, 5, 4, 12}, 1}, - GatherNDTestParams{ShapeVector{{7, 3, 11, 12}, {7, 6, 5, 4, 3}}, StaticShape{7, 6, 5, 4}, 1}}; + GatherNDTestParams{StaticShapeVector{{8, 11}, {8, 1}}, StaticShape{8}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 11, 12}, {8, 1}}, StaticShape{8, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 11, 12}, {8, 2}}, StaticShape{8}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 11, 12}, {8, 5, 1}}, StaticShape{8, 5, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 11, 12}, {8, 5, 2}}, StaticShape{8, 5}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {8, 2}}, StaticShape{8, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {8, 2, 1}}, StaticShape{8, 2, 11, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {8, 5, 2}}, StaticShape{8, 5, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {8, 5, 3}}, StaticShape{8, 5}, 1}, + GatherNDTestParams{StaticShapeVector{{8, 3, 11, 12}, {8, 7, 4, 2}}, StaticShape{8, 7, 4, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {7, 6, 5, 4, 1}}, StaticShape{7, 6, 5, 4, 11, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {7, 6, 5, 4, 2}}, StaticShape{7, 6, 5, 4, 12}, 1}, + GatherNDTestParams{StaticShapeVector{{7, 3, 11, 12}, {7, 6, 5, 4, 3}}, StaticShape{7, 6, 5, 4}, 1}}; TYPED_TEST_SUITE_P(StaticShapeInferenceGatherNDTest); @@ -101,8 +101,8 @@ TYPED_TEST_P(StaticShapeInferenceGatherNDTest, gather_nd_common_default_ctor) { auto op = std::make_shared(); op->set_batch_dims(1); - ShapeVector input_shapes{{8, 3, 11, 12}, {8, 5, 2}}; - ShapeVector output_shapes(1); + StaticShapeVector input_shapes{{8, 3, 11, 12}, {8, 5, 2}}; + StaticShapeVector output_shapes(1); output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes[0], (StaticShape{8, 5, 12})); @@ -124,13 +124,14 @@ TEST_P(StaticShapeInferenceGatherNDV5Test, gather_nd_v5_test) { INSTANTIATE_TEST_SUITE_P( shape_infer, StaticShapeInferenceGatherNDV5Test, - ::testing::Values(GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 2}}, StaticShape{24, 13}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 7, 2}}, StaticShape{24, 5, 7, 13}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 3}}, StaticShape{24}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 3}}, StaticShape{24, 5}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 1}}, StaticShape{24, 13}, 3}, - GatherNDTestParams{ShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 2}}, StaticShape{24}, 3}, - GatherNDTestParams{ShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 5, 2}}, StaticShape{24, 5}, 3}), + ::testing::Values( + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 2}}, StaticShape{24, 13}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 7, 2}}, StaticShape{24, 5, 7, 13}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 3}}, StaticShape{24}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 3}}, StaticShape{24, 5}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 1}}, StaticShape{24, 13}, 3}, + GatherNDTestParams{StaticShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 2}}, StaticShape{24}, 3}, + GatherNDTestParams{StaticShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 5, 2}}, StaticShape{24, 5}, 3}), print_params); // ------------------------------ V8 ------------------------------ @@ -143,11 +144,12 @@ TEST_P(StaticShapeInferenceGatherNDV8Test, gather_nd_v8_test) { INSTANTIATE_TEST_SUITE_P( shape_infer, StaticShapeInferenceGatherNDV8Test, - ::testing::Values(GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 2}}, StaticShape{6, 4, 13}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 7, 2}}, StaticShape{6, 4, 5, 7, 13}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 3}}, StaticShape{6, 4}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 3}}, StaticShape{6, 4, 5}, 2}, - GatherNDTestParams{ShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 1}}, StaticShape{6, 4, 1, 13}, 3}, - GatherNDTestParams{ShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 2}}, StaticShape{6, 4, 1}, 3}, - GatherNDTestParams{ShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 5, 2}}, StaticShape{6, 4, 1, 5}, 3}), + ::testing::Values( + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 2}}, StaticShape{6, 4, 13}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 7, 2}}, StaticShape{6, 4, 5, 7, 13}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 3}}, StaticShape{6, 4}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 11, 12, 13}, {6, 4, 5, 3}}, StaticShape{6, 4, 5}, 2}, + GatherNDTestParams{StaticShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 1}}, StaticShape{6, 4, 1, 13}, 3}, + GatherNDTestParams{StaticShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 2}}, StaticShape{6, 4, 1}, 3}, + GatherNDTestParams{StaticShapeVector{{6, 4, 1, 12, 13}, {6, 4, 1, 5, 2}}, StaticShape{6, 4, 1, 5}, 3}), print_params); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp index 56c1fdcec4b43a..30b0e54023cf3c 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_shape_inference_test.cpp @@ -15,23 +15,23 @@ using namespace ov; using namespace ov::intel_cpu; using namespace testing; -using TestParams = std::tuple; +using TestParams = std::tuple; template class StaticShapeInferenceGatherTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - OpStaticShapeInferenceTest::output_shapes = ShapeVector(1); + OpStaticShapeInferenceTest::output_shapes = StaticShapeVector(1); } - std::shared_ptr make_gather(const ShapeVector& shapes, const int32_t* const axis_val_ptr = nullptr) { + std::shared_ptr make_gather(const StaticShapeVector& shapes, const int32_t* const axis_val_ptr = nullptr) { const auto p_dims = std::vector(shapes[0].size(), -1); const auto i_dims = std::vector(shapes[1].size(), -1); auto param = std::make_shared(element::f32, PartialShape{p_dims}); auto indicies = std::make_shared(element::i32, PartialShape{i_dims}); if (axis_val_ptr) { - auto axis = op::v0::Constant::create(element::i32, Shape{}, {*axis_val_ptr}); + auto axis = op::v0::Constant::create(element::i32, ov::Shape{}, {*axis_val_ptr}); return this->make_op(param, indicies, axis); } else { auto axis = std::make_shared(element::i32, PartialShape{}); @@ -44,13 +44,13 @@ class StaticShapeInferenceGatherTest : public OpStaticShapeInferenceTest{make_tuple(0, ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({2, 2, 2})), - make_tuple(1, ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})), - make_tuple(-1, ShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})), - make_tuple(0, ShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({2, 1, 2, 2, 4})), - make_tuple(1, ShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({3, 2, 1, 2, 4})), - make_tuple(-1, ShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 2, 1, 2})), - make_tuple(-2, ShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 1, 2, 4}))}; + std::vector{make_tuple(0, StaticShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({2, 2, 2})), + make_tuple(1, StaticShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})), + make_tuple(-1, StaticShapeVector{{3, 2}, {2, 2}, {1}}, StaticShape({3, 2, 2})), + make_tuple(0, StaticShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({2, 1, 2, 2, 4})), + make_tuple(1, StaticShapeVector{{3, 2, 4}, {2, 1, 2}, {1}}, StaticShape({3, 2, 1, 2, 4})), + make_tuple(-1, StaticShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 2, 1, 2})), + make_tuple(-2, StaticShapeVector{{3, 2, 4}, {2, 1, 2}, {}}, StaticShape({3, 2, 1, 2, 4}))}; TYPED_TEST_SUITE_P(StaticShapeInferenceGatherTest); @@ -73,7 +73,7 @@ TYPED_TEST_P(StaticShapeInferenceGatherTest, axis_in_const_map) { std::tie(this->axis_val, this->input_shapes, this->exp_shape) = params; auto op = this->make_gather(this->input_shapes); - auto axis_tensor = ov::Tensor(element::i32, Shape{1}, &this->axis_val); + auto axis_tensor = ov::Tensor(element::i32, ov::Shape{1}, &this->axis_val); this->output_shapes = shape_inference(op.get(), this->input_shapes, {{2, axis_tensor}}); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_tree_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_tree_shape_inference_test.cpp index b53fc6c483ee60..25a10053a7cb18 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_tree_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gather_tree_shape_inference_test.cpp @@ -18,7 +18,7 @@ TEST_F(GatherTreeStaticShapeInferenceTest, gather_tree) { auto step_ids = std::make_shared(element::f32, PartialShape{-1, -1, -1}); auto parent_idx = std::make_shared(element::f32, PartialShape{-1, -1, -1}); auto max_seq_len = std::make_shared(element::f32, PartialShape{-1}); - auto end_token = std::make_shared(element::f32, PartialShape{Shape{}}); + auto end_token = std::make_shared(element::f32, PartialShape{}); op = make_op(step_ids, parent_idx, max_seq_len, end_token); input_shapes = {StaticShape{1, 2, 3}, StaticShape{1, 2, 3}, StaticShape{2}, StaticShape{}}; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_backprop_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_backprop_shape_inference_test.cpp index d048cc0598ccbc..16d15f84f7842d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_backprop_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_backprop_shape_inference_test.cpp @@ -32,7 +32,7 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor_with_o op->set_auto_pad(op::PadType::EXPLICIT); op->set_output_shape(spatial_shape.to_shape()); - input_shapes = ShapeVector{{1, 20, 224, 224}, {2, 10, 10, 3, 3}, {2}}; + input_shapes = StaticShapeVector{{1, 20, 224, 224}, {2, 10, 10, 3, 3}, {2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -53,9 +53,9 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor) { op->set_auto_pad(op::PadType::EXPLICIT); int32_t spatial_shape[] = {5, 10, 15}; - const auto const_data = std::unordered_map{{2, {element::i32, Shape{3}, spatial_shape}}}; + const auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{3}, spatial_shape}}}; - input_shapes = ShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}}; + input_shapes = StaticShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_data)); @@ -76,10 +76,10 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, default_ctor_more_i op->set_auto_pad(op::PadType::EXPLICIT); int32_t spatial_shape[] = {5, 10, 15}; - const auto const_data = std::unordered_map{{2, {element::i32, Shape{3}, spatial_shape}}}; + const auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{3}, spatial_shape}}}; // More than three inputs can be provided, but not used - input_shapes = ShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}, {0}}; + input_shapes = StaticShapeVector{{1, 6, 10, 12, 2}, {3, 2, 2, 5, 5, 5}, {3}, {0}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_data)); @@ -102,7 +102,7 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 2d_inputs_dynamic_r op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{1, 2, 5, 5}, {2, 1, 2, 3, 3}}; + input_shapes = StaticShapeVector{{1, 2, 5, 5}, {2, 1, 2, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -118,11 +118,11 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 3d_auto_pad_same_lo const auto data = std::make_shared(element::f32, PartialShape::dynamic(5)); const auto filters = std::make_shared(element::f32, PartialShape::dynamic(6)); - const auto out_spatial = op::v0::Constant::create(element::i64, Shape{3}, {2, 1, 3}); + const auto out_spatial = op::v0::Constant::create(element::i64, ov::Shape{3}, {2, 1, 3}); op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}, {3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -142,9 +142,9 @@ TEST_F(GroupConvolutionBackpropDataStaticShapeInferenceTest, 3d_auto_pad_same_up op = make_op(data, filters, out_spatial, strides, pads_begin, pads_end, dilations, auto_pad); int32_t spatial_dims[] = {2, 6, 1}; - const auto const_data = std::unordered_map{{2, {element::i32, Shape{3}, spatial_dims}}}; + const auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{3}, spatial_dims}}}; - input_shapes = ShapeVector{{3, 5, 5, 5, 5}, {1, 5, 1, 3, 3, 3}, {3}}; + input_shapes = StaticShapeVector{{3, 5, 5, 5, 5}, {1, 5, 1, 3, 3, 3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_shape_inference_test.cpp index 60ccf3a3b74141..94d44026bf3ecc 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/group_convolution_shape_inference_test.cpp @@ -29,7 +29,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, default_ctor_direct_infer_cal auto pads_begin = CoordinateDiff{2, 2}; auto pads_end = CoordinateDiff{2, 1}; - input_shapes = ShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}}; + input_shapes = StaticShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}}; output_shapes = ov::op::v1::shape_infer(op.get(), input_shapes, pads_begin, pads_end); EXPECT_EQ(output_shapes.size(), 1); @@ -46,7 +46,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, default_ctor) { op->set_pads_end({2, 1}); op->set_auto_pad(op::PadType::EXPLICIT); - input_shapes = ShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}}; + input_shapes = StaticShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -66,7 +66,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, default_ctor_three_input_shap op->set_auto_pad(op::PadType::EXPLICIT); // Third input shape (bias) can be provided, but is not used - input_shapes = ShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}, {3}}; + input_shapes = StaticShapeVector{{1, 6, 10, 12}, {3, 2, 2, 5, 5}, {3}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -89,7 +89,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 1d_explicit_pads_inputs_stati op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{1, 12, 20}, {12, 1, 1, 3}}; + input_shapes = StaticShapeVector{{1, 12, 20}, {12, 1, 1, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -108,7 +108,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 2d_auto_pads_same_lower_input op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -127,7 +127,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, 3d_auto_pad_same_lower_inputs op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad); - input_shapes = ShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5, 5}, {1, 6, 6, 3, 3, 3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -144,7 +144,7 @@ TEST_F(GroupConvolutionV1StaticShapeInferenceTest, dilations_not_defined_for_spa const auto data = std::make_shared(element::f32, PartialShape::dynamic()); const auto filters = std::make_shared(element::f32, PartialShape::dynamic()); - input_shapes = ShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}}; + input_shapes = StaticShapeVector{{1, 4, 5, 5}, {2, 1, 2, 3, 3}}; OV_EXPECT_THROW(op = make_op(data, filters, strides, pads_begin, pads_end, dilations, auto_pad), NodeValidationFailure, HasSubstr("Dilations should be defined for all and only spatial dimensions")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_cell_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_cell_shape_inference_test.cpp index b6310fafad128a..d59b16a0a50146 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_cell_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_cell_shape_inference_test.cpp @@ -9,12 +9,7 @@ using namespace ov; using namespace ov::intel_cpu; -class GRUCellV3StaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(1); - } -}; +class GRUCellV3StaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(GRUCellV3StaticShapeInferenceTest, default_ctor) { constexpr size_t batch_size = 2; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_sequence_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_sequence_shape_inference_test.cpp index 13aac38fb98d45..963e6dde83db30 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_sequence_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/gru_sequence_shape_inference_test.cpp @@ -9,12 +9,7 @@ using namespace ov; using namespace ov::intel_cpu; -class GRUSequenceV5StaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(2); - } -}; +class GRUSequenceV5StaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(GRUSequenceV5StaticShapeInferenceTest, default_ctor) { constexpr size_t batch_size = 2; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/i420_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/i420_shape_inference_test.cpp index 913abf62e25c9a..4f41c255699643 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/i420_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/i420_shape_inference_test.cpp @@ -23,7 +23,7 @@ TYPED_TEST_SUITE_P(ConvertColorI420Test); TYPED_TEST_P(ConvertColorI420Test, default_ctor_single_plane_no_args) { this->op = this->make_op(); - this->input_shapes = ShapeVector{{3, 15, 10, 1}}; + this->input_shapes = StaticShapeVector{{3, 15, 10, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -33,7 +33,7 @@ TYPED_TEST_P(ConvertColorI420Test, default_ctor_single_plane_no_args) { TYPED_TEST_P(ConvertColorI420Test, default_ctor_three_plane_no_args) { this->op = this->make_op(); - this->input_shapes = ShapeVector{{3, 20, 20, 1}, {3, 10, 10, 1}, {3, 10, 10, 1}}; + this->input_shapes = StaticShapeVector{{3, 20, 20, 1}, {3, 10, 10, 1}, {3, 10, 10, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -44,7 +44,7 @@ TYPED_TEST_P(ConvertColorI420Test, single_plane_dynamic_rank) { const auto yuv = std::make_shared(element::f32, PartialShape::dynamic()); this->op = this->make_op(yuv); - this->input_shapes = ShapeVector{{3, 12, 10, 1}}; + this->input_shapes = StaticShapeVector{{3, 12, 10, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -55,7 +55,7 @@ TYPED_TEST_P(ConvertColorI420Test, single_plane_static_rank) { const auto yuv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(yuv); - this->input_shapes = ShapeVector{{5, 3, 2, 1}}; + this->input_shapes = StaticShapeVector{{5, 3, 2, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -68,7 +68,7 @@ TYPED_TEST_P(ConvertColorI420Test, three_plane_dynamic_rank) { const auto v = std::make_shared(element::f32, PartialShape::dynamic()); this->op = this->make_op(y, u, v); - this->input_shapes = ShapeVector{{3, 10, 10, 1}, {3, 5, 5, 1}, {3, 5, 5, 1}}; + this->input_shapes = StaticShapeVector{{3, 10, 10, 1}, {3, 5, 5, 1}, {3, 5, 5, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -81,7 +81,7 @@ TYPED_TEST_P(ConvertColorI420Test, three_plane_static_rank) { const auto v = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(y, u, v); - this->input_shapes = ShapeVector{{5, 20, 20, 1}, {5, 10, 10, 1}, {5, 10, 10, 1}}; + this->input_shapes = StaticShapeVector{{5, 20, 20, 1}, {5, 10, 10, 1}, {5, 10, 10, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -94,7 +94,7 @@ TYPED_TEST_P(ConvertColorI420Test, three_plane_u_shape_not_compatible) { const auto v = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(y, u, v); - this->input_shapes = ShapeVector{{5, 20, 20, 1}, {4, 10, 10, 1}, {5, 10, 10, 1}}; + this->input_shapes = StaticShapeVector{{5, 20, 20, 1}, {4, 10, 10, 1}, {5, 10, 10, 1}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Y shape is inconsistent with U and V")); @@ -104,7 +104,7 @@ TYPED_TEST_P(ConvertColorI420Test, single_plane_height_not_div_by_three) { const auto yuv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(yuv); - this->input_shapes = ShapeVector{{5, 19, 20, 1}}; + this->input_shapes = StaticShapeVector{{5, 19, 20, 1}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Image height shall be divisible by 3")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/interpolate_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/interpolate_shape_inference_test.cpp index dd9dadcbdd358d..cffd9da21c0b9d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/interpolate_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/interpolate_shape_inference_test.cpp @@ -30,9 +30,9 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, default_ctor_no_attributes) { op->set_attrs(attrs); int32_t out_shape_v[] = {10, 20, 30}; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{3}, out_shape_v}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, out_shape_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -43,10 +43,10 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, out_shape_as_constant) { attrs.axes = AxisSet{1, 3}; const auto img = std::make_shared(element::f32, PartialShape::dynamic()); - const auto out_shape = op::v0::Constant::create(element::i64, Shape{2}, {100, 100}); + const auto out_shape = op::v0::Constant::create(element::i64, ov::Shape{2}, {100, 100}); op = make_op(img, out_shape, attrs); - input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -61,9 +61,9 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, all_inputs_dynamic_rank_use_scales op = make_op(img, out_shape, attrs); int32_t out_shape_v[] = {10, 20, 30}; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{3}, out_shape_v}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, out_shape_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -78,9 +78,9 @@ TEST_F(InterpolateV0StaticShapeInferenceTest, all_inputs_static_rank_use_sizes) op = make_op(img, out_shape, attrs); int32_t out_shape_v[] = {10, 20, 30}; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{3}, out_shape_v}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, out_shape_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -108,10 +108,10 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, default_ctor_no_attributes) { float scales_v[] = {1.5f, 3.0f, 0.2f}; int32_t axes_v[] = {2, 0, 5}; - const auto const_data = std::unordered_map{{2, {element::f32, Shape{3}, scales_v}}, - {3, {element::i32, Shape{3}, axes_v}}}; + const auto const_data = std::unordered_map{{2, {element::f32, ov::Shape{3}, scales_v}}, + {3, {element::i32, ov::Shape{3}, axes_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -123,11 +123,11 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, scales_as_constant) { const auto img = std::make_shared(element::f32, PartialShape::dynamic()); const auto sizes = std::make_shared(element::i32, PartialShape{1}); - const auto scales = op::v0::Constant::create(element::f32, Shape{2}, {2.0f, 0.7f}); - const auto axes = op::v0::Constant::create(element::i64, Shape{2}, {1, 3}); + const auto scales = op::v0::Constant::create(element::f32, ov::Shape{2}, {2.0f, 0.7f}); + const auto axes = op::v0::Constant::create(element::i64, ov::Shape{2}, {1, 3}); op = make_op(img, sizes, scales, axes, attrs); - input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {1}, {2}, {2}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128}, {1}, {2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -136,12 +136,12 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, scales_as_constant) { TEST_F(InterpolateV4StaticShapeInferenceTest, sizes_as_constant) { const auto img = std::make_shared(element::f32, PartialShape::dynamic()); - const auto sizes = op::v0::Constant::create(element::i32, Shape{2}, {10, 5}); + const auto sizes = op::v0::Constant::create(element::i32, ov::Shape{2}, {10, 5}); const auto scales = std::make_shared(element::f32, PartialShape{1}); - const auto axes = op::v0::Constant::create(element::i64, Shape{2}, {3, 1}); + const auto axes = op::v0::Constant::create(element::i64, ov::Shape{2}, {3, 1}); op = make_op(img, sizes, scales, axes, attrs); - input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}, {1}, {2}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128}, {2}, {1}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -160,10 +160,10 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, all_inputs_dynamic_rank_use_scales float scales_v[] = {1.5f, 3.0f, 0.2f}; int32_t axes_v[] = {2, 0, 5}; - const auto const_data = std::unordered_map{{2, {element::f32, Shape{3}, scales_v}}, - {3, {element::i32, Shape{3}, axes_v}}}; + const auto const_data = std::unordered_map{{2, {element::f32, ov::Shape{3}, scales_v}}, + {3, {element::i32, ov::Shape{3}, axes_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -181,10 +181,10 @@ TEST_F(InterpolateV4StaticShapeInferenceTest, all_inputs_static_rank_use_sizes) int32_t sizes_v[] = {10, 50, 60}; int32_t axes_v[] = {1, 0, 3}; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{3}, sizes_v}}, - {3, {element::i32, Shape{3}, axes_v}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, sizes_v}}, + {3, {element::i32, ov::Shape{3}, axes_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -212,10 +212,10 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, default_ctor_no_attributes) { float scales_v[] = {1.5f, 3.0f, 0.2f}; int32_t axes_v[] = {2, 0, 5}; - const auto const_data = std::unordered_map{{1, {element::f32, Shape{3}, scales_v}}, - {2, {element::i32, Shape{3}, axes_v}}}; + const auto const_data = std::unordered_map{{1, {element::f32, ov::Shape{3}, scales_v}}, + {2, {element::i32, ov::Shape{3}, axes_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -226,11 +226,11 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, scales_as_constant) { attrs.shape_calculation_mode = ShapeCalcMode::SCALES; const auto img = std::make_shared(element::f32, PartialShape::dynamic()); - const auto scales = op::v0::Constant::create(element::f32, Shape{2}, {2.0f, 0.7f}); - const auto axes = op::v0::Constant::create(element::i64, Shape{2}, {1, 3}); + const auto scales = op::v0::Constant::create(element::f32, ov::Shape{2}, {2.0f, 0.7f}); + const auto axes = op::v0::Constant::create(element::i64, ov::Shape{2}, {1, 3}); op = make_op(img, scales, axes, attrs); - input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}, {2}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128}, {2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -239,11 +239,11 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, scales_as_constant) { TEST_F(InterpolateV11StaticShapeInferenceTest, sizes_as_constant) { const auto img = std::make_shared(element::f32, PartialShape::dynamic()); - const auto sizes = op::v0::Constant::create(element::i32, Shape{2}, {10, 5}); - const auto axes = op::v0::Constant::create(element::i64, Shape{2}, {3, 1}); + const auto sizes = op::v0::Constant::create(element::i32, ov::Shape{2}, {10, 5}); + const auto axes = op::v0::Constant::create(element::i64, ov::Shape{2}, {3, 1}); op = make_op(img, sizes, axes, attrs); - input_shapes = ShapeVector{{5, 2, 128, 128, 128}, {2}, {2}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128}, {2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -260,10 +260,10 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, all_inputs_dynamic_rank_use_scale float scales_v[] = {1.5f, 3.0f, 0.2f}; int32_t axes_v[] = {2, 0, 5}; - const auto const_data = std::unordered_map{{1, {element::f32, Shape{3}, scales_v}}, - {2, {element::i32, Shape{3}, axes_v}}}; + const auto const_data = std::unordered_map{{1, {element::f32, ov::Shape{3}, scales_v}}, + {2, {element::i32, ov::Shape{3}, axes_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -280,10 +280,10 @@ TEST_F(InterpolateV11StaticShapeInferenceTest, all_inputs_static_rank_use_sizes) int32_t sizes_v[] = {10, 50, 60}; int32_t axes_v[] = {1, 0, 3}; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{3}, sizes_v}}, - {2, {element::i32, Shape{3}, axes_v}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, sizes_v}}, + {2, {element::i32, ov::Shape{3}, axes_v}}}; - input_shapes = ShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}}; + input_shapes = StaticShapeVector{{5, 2, 128, 128, 128, 64}, {3}, {3}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/inverse_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/inverse_shape_inference_test.cpp index 2296d206c507df..5ab768c847cc7c 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/inverse_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/inverse_shape_inference_test.cpp @@ -18,7 +18,7 @@ TEST_F(Inversev14StaticShapeInferenceTest, inverse_default_ctor) { op = make_op(); op->set_adjoint(false); - input_shapes = ShapeVector{{2, 2}}; + input_shapes = StaticShapeVector{{2, 2}}; auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -29,7 +29,7 @@ TEST_F(Inversev14StaticShapeInferenceTest, inverse_4_4_small_matrix) { auto data = std::make_shared(element::f32, PartialShape::dynamic(2)); auto inverse = std::make_shared(data, false); - input_shapes = ShapeVector{{4, 4}}; + input_shapes = StaticShapeVector{{4, 4}}; auto output_shapes = shape_inference(inverse.get(), input_shapes); ASSERT_EQ(output_shapes[0], StaticShape({4, 4})); } @@ -38,7 +38,7 @@ TEST_F(Inversev14StaticShapeInferenceTest, inverse_10_10_big_matrix) { auto data = std::make_shared(element::f32, PartialShape::dynamic(2)); auto inverse = std::make_shared(data, false); - input_shapes = ShapeVector{{10, 10}}; + input_shapes = StaticShapeVector{{10, 10}}; auto output_shapes = shape_inference(inverse.get(), input_shapes); ASSERT_EQ(output_shapes[0], StaticShape({10, 10})); } @@ -47,7 +47,7 @@ TEST_F(Inversev14StaticShapeInferenceTest, inverse_10_1_1_keep_batch_when_single auto data = std::make_shared(element::f32, PartialShape::dynamic(3)); auto inverse = std::make_shared(data, false); - input_shapes = ShapeVector{{10, 1, 1}}; + input_shapes = StaticShapeVector{{10, 1, 1}}; auto output_shapes = shape_inference(inverse.get(), input_shapes); ASSERT_EQ(output_shapes[0], StaticShape({10, 1, 1})); } @@ -56,7 +56,7 @@ TEST_F(Inversev14StaticShapeInferenceTest, inverse_10_9_9_keep_batch_big_matrix) auto data = std::make_shared(element::f32, PartialShape::dynamic(3)); auto inverse = std::make_shared(data, false); - input_shapes = ShapeVector{{10, 9, 9}}; + input_shapes = StaticShapeVector{{10, 9, 9}}; auto output_shapes = shape_inference(inverse.get(), input_shapes); ASSERT_EQ(output_shapes[0], StaticShape({10, 9, 9})); } @@ -65,7 +65,7 @@ TEST_F(Inversev14StaticShapeInferenceTest, inverse_10_5_3_2_2_complex_multi_dim_ auto data = std::make_shared(element::f32, PartialShape::dynamic(3)); auto inverse = std::make_shared(data, false); - input_shapes = ShapeVector{{10, 5, 3, 2, 2}}; + input_shapes = StaticShapeVector{{10, 5, 3, 2, 2}}; auto output_shapes = shape_inference(inverse.get(), input_shapes); ASSERT_EQ(output_shapes[0], StaticShape({10, 5, 3, 2, 2})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/logical_not_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/logical_not_shape_inference_test.cpp index 2e7945731b7a54..f05feda1b0ea95 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/logical_not_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/logical_not_shape_inference_test.cpp @@ -13,12 +13,7 @@ using namespace ov; using namespace ov::intel_cpu; using namespace testing; -class LogicalNotStaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(1); - } -}; +class LogicalNotStaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(LogicalNotStaticShapeInferenceTest, static_rank) { const auto a = std::make_shared(element::boolean, PartialShape{-1, -1, -1, -1}); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_cell_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_cell_shape_inference_test.cpp index f2dc8a2328c28e..c50aa9ea6be599 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_cell_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_cell_shape_inference_test.cpp @@ -9,12 +9,7 @@ using namespace ov; using namespace ov::intel_cpu; -class LSTMCellV4StaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(2); - } -}; +class LSTMCellV4StaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(LSTMCellV4StaticShapeInferenceTest, default_ctor) { const size_t batch_size = 2; @@ -72,7 +67,7 @@ TEST(StaticShapeInferenceTest, LSTMCellV0Test) { const auto H_t = std::make_shared(element::f32, PartialShape{-1, -1}); const auto C_t = std::make_shared(element::f32, PartialShape{-1, -1}); const auto Bias = std::make_shared(element::f32, PartialShape{-1}); - const auto Peelhole = op::v0::Constant::create(element::f32, Shape{3 * hidden_size}, std::vector{0.f}); + const auto Peelhole = op::v0::Constant::create(element::f32, ov::Shape{3 * hidden_size}, std::vector{0.f}); const auto lstm_cell = std::make_shared(X, H_t, C_t, W, R, Bias, Peelhole, hidden_size); std::vector static_input_shapes = {StaticShape{batch_size, input_size}, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_seq_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_seq_shape_inference_test.cpp index a8b4160f405fd1..002bb88de88f54 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_seq_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/lstm_seq_shape_inference_test.cpp @@ -9,12 +9,7 @@ using namespace ov; using namespace ov::intel_cpu; -class LSTMSequenceV5StaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(3); - } -}; +class LSTMSequenceV5StaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(LSTMSequenceV5StaticShapeInferenceTest, default_ctor) { constexpr size_t batch_size = 2; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/matrix_nms_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/matrix_nms_shape_inference_test.cpp index e6739c3ddec0f2..e4c4873c1f84a8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/matrix_nms_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/matrix_nms_shape_inference_test.cpp @@ -22,10 +22,10 @@ TEST_F(StaticShapeInferenceMatrixNmsV8Test, default_ctor_no_args) { op = make_op(); op->set_attrs(attrs); - input_shapes = ShapeVector{{5, 2, 4}, {5, 3, 2}}; + input_shapes = StaticShapeVector{{5, 2, 4}, {5, 3, 2}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{20, 6}, {20, 1}, {5}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{20, 6}, {20, 1}, {5}})); } TEST_F(StaticShapeInferenceMatrixNmsV8Test, inputs_static_rank) { @@ -34,10 +34,10 @@ TEST_F(StaticShapeInferenceMatrixNmsV8Test, inputs_static_rank) { op = make_op(boxes, scores, attrs); - input_shapes = ShapeVector{{3, 2, 4}, {3, 3, 2}}; + input_shapes = StaticShapeVector{{3, 2, 4}, {3, 3, 2}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{18, 6}, {18, 1}, {3}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{18, 6}, {18, 1}, {3}})); } TEST_F(StaticShapeInferenceMatrixNmsV8Test, all_inputs_are_dynamic) { @@ -46,8 +46,8 @@ TEST_F(StaticShapeInferenceMatrixNmsV8Test, all_inputs_are_dynamic) { op = make_op(boxes, scores, attrs); - input_shapes = ShapeVector{{5, 2, 4}, {5, 3, 2}}; + input_shapes = StaticShapeVector{{5, 2, 4}, {5, 3, 2}}; output_shapes = shape_inference(op.get(), input_shapes); - EXPECT_EQ(output_shapes, ShapeVector({{30, 6}, {30, 1}, {5}})); + EXPECT_EQ(output_shapes, StaticShapeVector({{30, 6}, {30, 1}, {5}})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/max_pool_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/max_pool_shape_inference_test.cpp index 97beda20917414..ffce8197b28a68 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/max_pool_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/max_pool_shape_inference_test.cpp @@ -28,7 +28,7 @@ TEST_F(MaxPoolV1StaticShapeInferenceTest, default_ctor) { op->set_rounding_type(op::RoundingType::FLOOR); op->set_auto_pad(op::PadType::VALID); - input_shapes = ShapeVector{{1, 3, 10, 12}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -51,7 +51,7 @@ TEST_F(MaxPoolV1StaticShapeInferenceTest, no_auto_pad_round_floor) { op = make_op(data, strides, pads_begin, pads_end, kernel_shape, rounding_mode, pad_type); - input_shapes = ShapeVector{{1, 3, 10, 12}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -74,7 +74,7 @@ TEST_F(MaxPoolV1StaticShapeInferenceTest, auto_padding_same_lower_round_ceil) { op = make_op(data, strides, pads_begin, pads_end, kernel_shape, rounding_mode, pad_type); - input_shapes = ShapeVector{{1, 3, 10, 12, 20}}; + input_shapes = StaticShapeVector{{1, 3, 10, 12, 20}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -96,13 +96,13 @@ TEST_F(MaxPoolV14StaticShapeInferenceTest, ceil_torch_mode_1) { const auto data = std::make_shared(element::f64, PartialShape::dynamic()); const Strides strides{2, 2}; const Strides dilations{1, 1}; - const Shape pads_begin{1, 1}; - const Shape pads_end{1, 1}; - const Shape kernel_shape{2, 2}; + const ov::Shape pads_begin{1, 1}; + const ov::Shape pads_end{1, 1}; + const ov::Shape kernel_shape{2, 2}; const auto rounding_mode = op::RoundingType::CEIL_TORCH; op = make_op(data, strides, dilations, pads_begin, pads_end, kernel_shape, rounding_mode); - this->input_shapes = ShapeVector{{1, 3, 5, 5}}; + this->input_shapes = StaticShapeVector{{1, 3, 5, 5}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -114,13 +114,13 @@ TEST_F(MaxPoolV14StaticShapeInferenceTest, ceil_torch_mode_2) { const auto data = std::make_shared(element::f64, PartialShape::dynamic()); const Strides strides{2, 2}; const Strides dilations{1, 1}; - const Shape pads_begin{1, 1}; - const Shape pads_end{1, 1}; - const Shape kernel_shape{2, 2}; + const ov::Shape pads_begin{1, 1}; + const ov::Shape pads_end{1, 1}; + const ov::Shape kernel_shape{2, 2}; const auto rounding_mode = op::RoundingType::CEIL_TORCH; op = make_op(data, strides, dilations, pads_begin, pads_end, kernel_shape, rounding_mode); - this->input_shapes = ShapeVector{{1, 3, 9, 9}}; + this->input_shapes = StaticShapeVector{{1, 3, 9, 9}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -143,7 +143,7 @@ TYPED_TEST_P(MaxPoolCommonStaticShapeInferenceTest, default_ctor) { this->op->set_rounding_type(op::RoundingType::FLOOR); this->op->set_auto_pad(op::PadType::VALID); - this->input_shapes = ShapeVector{{1, 3, 10, 12}}; + this->input_shapes = StaticShapeVector{{1, 3, 10, 12}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -165,7 +165,7 @@ TYPED_TEST_P(MaxPoolCommonStaticShapeInferenceTest, no_dilation) { this->op = this->make_op(data, strides, dilations, pads_begin, pads_end, kernel_shape); - this->input_shapes = ShapeVector{{2, 3, 13, 13}}; + this->input_shapes = StaticShapeVector{{2, 3, 13, 13}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -187,7 +187,7 @@ TYPED_TEST_P(MaxPoolCommonStaticShapeInferenceTest, with_dilations) { this->op = this->make_op(data, strides, dilations, pads_begin, pads_end, kernel_shape); - this->input_shapes = ShapeVector{{2, 4, 13, 13}}; + this->input_shapes = StaticShapeVector{{2, 4, 13, 13}}; auto shape_infer = make_shape_inference(this->op); const auto input_shape_refs = make_static_shape_refs(this->input_shapes); this->output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp index 80f6cf361d536a..7f0d8d169b0474 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/multinomial_shape_inference_test.cpp @@ -12,14 +12,14 @@ using namespace ov; using namespace ov::intel_cpu; TEST(StaticShapeInferenceTest, MultinomialStaticShapeInferenceTest2D) { - auto probs = std::make_shared(element::f32, Shape{4, 4}); - auto num_samples = std::make_shared(element::i32, Shape{1}); + auto probs = std::make_shared(element::f32, ov::Shape{4, 4}); + auto num_samples = std::make_shared(element::i32, ov::Shape{1}); auto multinomial = std::make_shared(probs, num_samples, element::i32, false, false, 0, 0); // Test Static Shape 2D input std::vector static_input_shapes = {StaticShape{4, 4}, StaticShape{1}}; int32_t num_elements_val = 2; - auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &num_elements_val}}}; + auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &num_elements_val}}}; auto acc = make_tensor_accessor(const_data); auto static_output_shapes = shape_infer(multinomial.get(), static_input_shapes, acc); ASSERT_EQ(static_output_shapes[0], StaticShape({4, 2})); @@ -33,7 +33,7 @@ TEST(StaticShapeInferenceTest, MultinomialDynamicShapeInferenceTestAllDimKnown2D // Test Partial Shape 2D input std::vector partial_input_shapes = {PartialShape{2, 3}, PartialShape{1}}; int32_t num_elements_val = 2; - auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &num_elements_val}}}; + auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &num_elements_val}}}; auto acc = make_tensor_accessor(const_data); auto partial_output_shapes = shape_infer(multinomial.get(), partial_input_shapes, acc); ASSERT_EQ(partial_output_shapes[0], PartialShape({2, 2})); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/nms_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/nms_shape_inference_test.cpp index 77151688075855..10813535ea815f 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/nms_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/nms_shape_inference_test.cpp @@ -21,7 +21,7 @@ TYPED_TEST_P(NMSNonDynamicOutputTest, default_ctor_no_args) { int16_t max_output_boxes = 3; const auto const_data = std::unordered_map{{2, {element::i16, ov::Shape{}, &max_output_boxes}}}; - this->input_shapes = ShapeVector{{1, 6, 4}, {1, 1, 6}, {}, {}, {}}; + this->input_shapes = StaticShapeVector{{1, 6, 4}, {1, 1, 6}, {}, {}, {}}; const auto output_shapes = shape_inference(op.get(), this->input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -32,12 +32,12 @@ TYPED_TEST_P(NMSNonDynamicOutputTest, boxes_scores_dynamic_rank_max_out_as_const const auto boxes = std::make_shared(element::f32, PartialShape::dynamic()); const auto scores = std::make_shared(element::f32, PartialShape::dynamic()); const auto max_output_boxes_per_class = op::v0::Constant::create(element::i16, ov::Shape{}, {3}); - const auto iou_threshold = std::make_shared(element::f32, Shape{}); - const auto score_threshold = std::make_shared(element::f32, Shape{}); + const auto iou_threshold = std::make_shared(element::f32, ov::Shape{}); + const auto score_threshold = std::make_shared(element::f32, ov::Shape{}); const auto op = this->make_op(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - this->input_shapes = ShapeVector{{1, 6, 4}, {1, 1, 6}, {}, {}, {}}; + this->input_shapes = StaticShapeVector{{1, 6, 4}, {1, 1, 6}, {}, {}, {}}; const auto output_shapes = shape_inference(op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -56,7 +56,7 @@ TYPED_TEST_P(NMSNonDynamicOutputTest, all_inputs_are_dynamic) { const auto op = this->make_op(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - this->input_shapes = ShapeVector{{1, 6, 4}, {1, 1, 6}, {}, {}, {}}; + this->input_shapes = StaticShapeVector{{1, 6, 4}, {1, 1, 6}, {}, {}, {}}; const auto output_shapes = shape_inference(op.get(), this->input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/nv12_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/nv12_shape_inference_test.cpp index 1cbb30f16ebf08..883098697f05a9 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/nv12_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/nv12_shape_inference_test.cpp @@ -21,7 +21,7 @@ TYPED_TEST_SUITE_P(ConvertColorNV12Test); TYPED_TEST_P(ConvertColorNV12Test, default_ctor_single_plane_no_args) { this->op = this->make_op(); - this->input_shapes = ShapeVector{{3, 30, 10, 1}}; + this->input_shapes = StaticShapeVector{{3, 30, 10, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -31,7 +31,7 @@ TYPED_TEST_P(ConvertColorNV12Test, default_ctor_single_plane_no_args) { TYPED_TEST_P(ConvertColorNV12Test, default_ctor_two_plane_no_args) { this->op = this->make_op(); - this->input_shapes = ShapeVector{{3, 20, 20, 1}, {3, 10, 10, 2}}; + this->input_shapes = StaticShapeVector{{3, 20, 20, 1}, {3, 10, 10, 2}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -42,7 +42,7 @@ TYPED_TEST_P(ConvertColorNV12Test, single_plane_dynamic_rank) { const auto yuv = std::make_shared(element::f32, PartialShape::dynamic()); this->op = this->make_op(yuv); - this->input_shapes = ShapeVector{{3, 12, 10, 1}}; + this->input_shapes = StaticShapeVector{{3, 12, 10, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -53,7 +53,7 @@ TYPED_TEST_P(ConvertColorNV12Test, single_plane_static_rank) { const auto yuv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(yuv); - this->input_shapes = ShapeVector{{5, 3, 2, 1}}; + this->input_shapes = StaticShapeVector{{5, 3, 2, 1}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -65,7 +65,7 @@ TYPED_TEST_P(ConvertColorNV12Test, two_plane_dynamic_rank) { const auto uv = std::make_shared(element::f32, PartialShape::dynamic()); this->op = this->make_op(y, uv); - this->input_shapes = ShapeVector{{3, 10, 10, 1}, {3, 5, 5, 2}}; + this->input_shapes = StaticShapeVector{{3, 10, 10, 1}, {3, 5, 5, 2}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -77,7 +77,7 @@ TYPED_TEST_P(ConvertColorNV12Test, two_plane_static_rank) { const auto uv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(y, uv); - this->input_shapes = ShapeVector{{5, 20, 20, 1}, {5, 10, 10, 2}}; + this->input_shapes = StaticShapeVector{{5, 20, 20, 1}, {5, 10, 10, 2}}; auto output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -89,7 +89,7 @@ TYPED_TEST_P(ConvertColorNV12Test, two_plane_uv_shape_not_compatible) { const auto uv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(y, uv); - this->input_shapes = ShapeVector{{5, 20, 20, 1}, {4, 10, 10, 2}}; + this->input_shapes = StaticShapeVector{{5, 20, 20, 1}, {4, 10, 10, 2}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Y shape is inconsistent with UV")); @@ -100,7 +100,7 @@ TYPED_TEST_P(ConvertColorNV12Test, two_plane_y_dims_not_div_by_2) { const auto uv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(y, uv); - this->input_shapes = ShapeVector{{5, 19, 19, 1}, {4, 10, 10, 2}}; + this->input_shapes = StaticShapeVector{{5, 19, 19, 1}, {4, 10, 10, 2}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Y shape is inconsistent with UV")); @@ -110,7 +110,7 @@ TYPED_TEST_P(ConvertColorNV12Test, single_plane_height_not_div_by_three) { const auto yuv = std::make_shared(element::f32, PartialShape::dynamic(4)); this->op = this->make_op(yuv); - this->input_shapes = ShapeVector{{5, 19, 20, 1}}; + this->input_shapes = StaticShapeVector{{5, 19, 20, 1}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Image height shall be divisible by 3")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp index 544ed41ba7b7cf..3486fc3e2cffb3 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp @@ -14,9 +14,9 @@ using namespace testing; TEST(StaticShapeInferenceTest, OneHotTestConstantInput) { auto indices = std::make_shared(element::i64, PartialShape{-1}); - auto depth = op::v0::Constant::create(element::i64, Shape{}, {2}); - auto on_value = op::v0::Constant::create(element::u32, Shape{}, {5}); - auto off_value = op::v0::Constant::create(element::u32, Shape{}, {10}); + auto depth = op::v0::Constant::create(element::i64, ov::Shape{}, {2}); + auto on_value = op::v0::Constant::create(element::u32, ov::Shape{}, {5}); + auto off_value = op::v0::Constant::create(element::u32, ov::Shape{}, {10}); int64_t axis = -1; auto ont_hot = std::make_shared(indices, depth, on_value, off_value, axis); // Test StaticShape @@ -27,9 +27,9 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantInput) { TEST(StaticShapeInferenceTest, OneHotTestConstantMap) { auto indices = std::make_shared(element::i64, PartialShape{-1}); - auto depth = std::make_shared(element::i64, Shape{}); - auto on_param = std::make_shared(element::i32, Shape{}); - auto off_param = std::make_shared(element::i32, Shape{}); + auto depth = std::make_shared(element::i64, ov::Shape{}); + auto on_param = std::make_shared(element::i32, ov::Shape{}); + auto off_param = std::make_shared(element::i32, ov::Shape{}); int64_t axis = -1; auto ont_hot = std::make_shared(indices, depth, on_param, off_param, axis); @@ -67,9 +67,9 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMapDefaultCtor) { TEST(StaticShapeInferenceTest, OneHotTestConstantMapNegativeDepth) { auto indices = std::make_shared(element::i64, PartialShape{-1}); - auto depth = std::make_shared(element::i64, Shape{}); - auto on_param = std::make_shared(element::i32, Shape{}); - auto off_param = std::make_shared(element::i32, Shape{}); + auto depth = std::make_shared(element::i64, ov::Shape{}); + auto on_param = std::make_shared(element::i32, ov::Shape{}); + auto off_param = std::make_shared(element::i32, ov::Shape{}); int64_t axis = -1; auto ont_hot = std::make_shared(indices, depth, on_param, off_param, axis); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/pad_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/pad_shape_inference_test.cpp index dde984401adfa9..27a1eee9812d39 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/pad_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/pad_shape_inference_test.cpp @@ -30,10 +30,10 @@ TYPED_TEST_P(PadStaticShapeInference, default_ctor) { int64_t pads_begin[] = {3, 2, 1, 1}; int32_t pads_end[] = {0, 1, 2, 3}; - const auto const_data = std::unordered_map{{1, {element::i64, Shape{4}, pads_begin}}, - {2, {element::i32, Shape{4}, pads_end}}}; + const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{4}, pads_begin}}, + {2, {element::i32, ov::Shape{4}, pads_end}}}; - this->input_shapes = ShapeVector{{3, 6, 5, 5}, {4}, {4}}; + this->input_shapes = StaticShapeVector{{3, 6, 5, 5}, {4}, {4}}; this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data); EXPECT_EQ(this->output_shapes.size(), 1); @@ -43,13 +43,13 @@ TYPED_TEST_P(PadStaticShapeInference, default_ctor) { TYPED_TEST_P(PadStaticShapeInference, pads_begin_end_value_as_constants) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); - const auto pads_begin = Constant::create(element::i64, Shape{4}, {3, 2, 1, 0}); - const auto pads_end = Constant::create(element::i64, Shape{4}, {0, 1, 2, 3}); - const auto pad_val = Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = Constant::create(element::i64, ov::Shape{4}, {3, 2, 1, 0}); + const auto pads_end = Constant::create(element::i64, ov::Shape{4}, {0, 1, 2, 3}); + const auto pad_val = Constant::create(element::f32, ov::Shape{}, {2112}); const auto op = this->make_op(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT); - this->input_shapes = ShapeVector{{3, 6, 5, 5}, {4}, {4}, {}}; + this->input_shapes = StaticShapeVector{{3, 6, 5, 5}, {4}, {4}, {}}; this->output_shapes = shape_inference(op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), 1); @@ -64,12 +64,12 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_end_in_constant_map) { uint64_t pads_begin_data[] = {0, 2, 2, 0}; uint32_t pads_end_data[] = {0, 1, 2, 0}; - const auto const_data = std::unordered_map{{1, {element::u64, Shape{4}, pads_begin_data}}, - {2, {element::u32, Shape{4}, pads_end_data}}}; + const auto const_data = std::unordered_map{{1, {element::u64, ov::Shape{4}, pads_begin_data}}, + {2, {element::u32, ov::Shape{4}, pads_end_data}}}; const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT); - this->input_shapes = ShapeVector{{3, 6, 5, 1}, {4}, {4}}; + this->input_shapes = StaticShapeVector{{3, 6, 5, 1}, {4}, {4}}; this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data); EXPECT_EQ(this->output_shapes.front(), StaticShape({3, 9, 9, 1})); @@ -78,14 +78,14 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_end_in_constant_map) { TYPED_TEST_P(PadStaticShapeInference, pads_begin_got_negative_value) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); const auto pads_begin = std::make_shared(element::i8, PartialShape::dynamic()); - const auto pads_end = Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); + const auto pads_end = Constant::create(element::i64, ov::Shape{4}, {0, 0, 0, 0}); int8_t pads_begin_data[] = {0, -2, -2, 0}; - const auto const_data = std::unordered_map{{1, {element::i8, Shape{4}, pads_begin_data}}}; + const auto const_data = std::unordered_map{{1, {element::i8, ov::Shape{4}, pads_begin_data}}}; const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT); - this->input_shapes = ShapeVector{{3, SIZE_MAX, 5, 2}, {4}, {4}}; + this->input_shapes = StaticShapeVector{{3, SIZE_MAX, 5, 2}, {4}, {4}}; this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data); @@ -94,15 +94,15 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_got_negative_value) { TYPED_TEST_P(PadStaticShapeInference, pads_end_got_negative_value) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); - const auto pads_begin = Constant::create(element::i64, Shape{4}, {1, 1, 2, 1}); + const auto pads_begin = Constant::create(element::i64, ov::Shape{4}, {1, 1, 2, 1}); const auto pads_end = std::make_shared(element::i8, PartialShape::dynamic()); const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT); int8_t pads_end_data[] = {0, -3, -2, 0}; - const auto const_data = std::unordered_map{{2, {element::i8, Shape{4}, pads_end_data}}}; + const auto const_data = std::unordered_map{{2, {element::i8, ov::Shape{4}, pads_end_data}}}; - this->input_shapes = ShapeVector{{3, 6, 5, SIZE_MAX}, {4}, {4}}; + this->input_shapes = StaticShapeVector{{3, 6, 5, SIZE_MAX}, {4}, {4}}; this->output_shapes = shape_inference(op.get(), this->input_shapes, const_data); @@ -112,12 +112,12 @@ TYPED_TEST_P(PadStaticShapeInference, pads_end_got_negative_value) { TYPED_TEST_P(PadStaticShapeInference, pads_begin_is_empty) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); const auto pads_begin = std::make_shared(element::u64, PartialShape::dynamic()); - const auto pads_end = Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); + const auto pads_end = Constant::create(element::i64, ov::Shape{4}, {0, 0, 0, 0}); const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT); - const auto const_data = std::unordered_map{{1, {element::u64, Shape{0}}}}; + const auto const_data = std::unordered_map{{1, {element::u64, ov::Shape{0}}}}; - this->input_shapes = ShapeVector{{3, 6, 5, 2}, {0}, {4}}; + this->input_shapes = StaticShapeVector{{3, 6, 5, 2}, {0}, {4}}; OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, const_data), NodeValidationFailure, @@ -126,13 +126,13 @@ TYPED_TEST_P(PadStaticShapeInference, pads_begin_is_empty) { TYPED_TEST_P(PadStaticShapeInference, pads_end_is_empty) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); - const auto pads_begin = Constant::create(element::i64, Shape{4}, {1, 1, 1, 1}); + const auto pads_begin = Constant::create(element::i64, ov::Shape{4}, {1, 1, 1, 1}); const auto pads_end = std::make_shared(element::i8, PartialShape::dynamic()); const auto op = this->make_op(data, pads_begin, pads_end, op::PadMode::REFLECT); - const auto const_data = std::unordered_map{{2, {element::i8, Shape{0}}}}; + const auto const_data = std::unordered_map{{2, {element::i8, ov::Shape{0}}}}; - this->input_shapes = ShapeVector{{3, 6, 5, 2}, {4}, {0}}; + this->input_shapes = StaticShapeVector{{3, 6, 5, 2}, {4}, {0}}; OV_EXPECT_THROW(shape_inference(op.get(), this->input_shapes, const_data), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_clustered_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_clustered_shape_inference_test.cpp index a7da58358ee259..3fe1752c865341 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_clustered_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_clustered_shape_inference_test.cpp @@ -29,7 +29,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, default_ctor_no_args) { op->set_attrs(attrs); int32_t out_size[] = {2, 5}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, out_size}}}); @@ -45,7 +45,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_dynamic_rank) { int32_t output_size[] = {2, 5}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}}); EXPECT_EQ(output_shapes.size(), 1); @@ -60,7 +60,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_static_rank) { int32_t output_size[] = {5, 2}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}}); EXPECT_EQ(output_shapes.size(), 1); @@ -68,12 +68,12 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_static_rank) { } TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, out_size_constant) { - const auto out_size = op::v0::Constant::create(element::i32, Shape{2}, {4, 6}); + const auto out_size = op::v0::Constant::create(element::i32, ov::Shape{2}, {4, 6}); const auto img_size = std::make_shared(element::i32, PartialShape::dynamic(1)); op = make_op(out_size, img_size, attrs); - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -81,12 +81,12 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, out_size_constant) { } TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, all_inputs_constants) { - const auto out_size = op::v0::Constant::create(element::i32, Shape{2}, {12, 16}); - const auto img_size = op::v0::Constant::create(element::i32, Shape{2}, {50, 50}); + const auto out_size = op::v0::Constant::create(element::i32, ov::Shape{2}, {12, 16}); + const auto img_size = op::v0::Constant::create(element::i32, ov::Shape{2}, {50, 50}); op = make_op(out_size, img_size, attrs); - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -100,7 +100,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, invalid_number_of_elements_i op = make_op(out_size, img_size, attrs); int64_t output_size[] = {5, 2, 1}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}), NodeValidationFailure, @@ -114,7 +114,7 @@ TEST_F(PriorBoxClusteredV0StaticShapeInferenceTest, invalid_input_ranks) { op = make_op(out_size, img_size, attrs); int64_t output_size[] = {5, 2, 1}; - input_shapes = ShapeVector{{2, 1}, {2}}; + input_shapes = StaticShapeVector{{2, 1}, {2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_shape_inference_test.cpp index 263582507de76c..82a2ae1c3dc7a6 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/prior_box_shape_inference_test.cpp @@ -30,7 +30,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, default_ctor_no_args) { op->set_attrs(attrs); int32_t out_size[] = {2, 5}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, out_size}}}); @@ -46,7 +46,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_dynamic_rank) { int32_t output_size[] = {2, 5}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}}); EXPECT_EQ(output_shapes.size(), 1); @@ -61,7 +61,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_static_rank) { int32_t output_size[] = {5, 2}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes, {{0, {element::i32, ov::Shape{2}, output_size}}}); EXPECT_EQ(output_shapes.size(), 1); @@ -69,12 +69,12 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_static_rank) { } TEST_F(PriorBoxV8StaticShapeInferenceTest, out_size_constant) { - const auto out_size = op::v0::Constant::create(element::i32, Shape{2}, {4, 6}); + const auto out_size = op::v0::Constant::create(element::i32, ov::Shape{2}, {4, 6}); const auto img_size = std::make_shared(element::i32, PartialShape::dynamic(1)); op = make_op(out_size, img_size, attrs); - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -82,12 +82,12 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, out_size_constant) { } TEST_F(PriorBoxV8StaticShapeInferenceTest, all_inputs_constants) { - const auto out_size = op::v0::Constant::create(element::i32, Shape{2}, {12, 16}); - const auto img_size = op::v0::Constant::create(element::i32, Shape{2}, {50, 50}); + const auto out_size = op::v0::Constant::create(element::i32, ov::Shape{2}, {12, 16}); + const auto img_size = op::v0::Constant::create(element::i32, ov::Shape{2}, {50, 50}); op = make_op(out_size, img_size, attrs); - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -101,7 +101,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, invalid_number_of_elements_in_out_siz op = make_op(out_size, img_size, attrs); int64_t output_size[] = {5, 2, 1}; - input_shapes = ShapeVector{{2}, {2}}; + input_shapes = StaticShapeVector{{2}, {2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}), NodeValidationFailure, @@ -115,7 +115,7 @@ TEST_F(PriorBoxV8StaticShapeInferenceTest, invalid_input_ranks) { op = make_op(out_size, img_size, attrs); int64_t output_size[] = {5, 2, 1}; - input_shapes = ShapeVector{{2, 1}, {2}}; + input_shapes = StaticShapeVector{{2, 1}, {2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, {{0, {element::i64, ov::Shape{3}, output_size}}}), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/proposal_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/proposal_shape_inference_test.cpp index 85965fac741338..8b2c8ce579dcee 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/proposal_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/proposal_shape_inference_test.cpp @@ -39,7 +39,7 @@ TYPED_TEST_P(ProposalTest, default_ctor) { this->op = this->make_op(); this->op->set_attrs(this->make_attrs(10)); - this->input_shapes = ShapeVector{{2, 3, 10, 10}, {2, 6, 10, 10}, {3}}; + this->input_shapes = StaticShapeVector{{2, 3, 10, 10}, {2, 6, 10, 10}, {3}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), this->exp_out_size()); @@ -53,7 +53,7 @@ TYPED_TEST_P(ProposalTest, all_inputs_dynamic_rank) { this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(4)); - this->input_shapes = ShapeVector{{2, 3, 10, 10}, {2, 6, 10, 10}, {3}}; + this->input_shapes = StaticShapeVector{{2, 3, 10, 10}, {2, 6, 10, 10}, {3}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), this->exp_out_size()); @@ -67,7 +67,7 @@ TYPED_TEST_P(ProposalTest, all_inputs_static_rank) { this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(5)); - this->input_shapes = ShapeVector{{3, 4, 10, 10}, {3, 8, 10, 10}, {4}}; + this->input_shapes = StaticShapeVector{{3, 4, 10, 10}, {3, 8, 10, 10}, {4}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), this->exp_out_size()); @@ -81,7 +81,7 @@ TYPED_TEST_P(ProposalTest, batch_size_not_compatible) { this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(5)); - this->input_shapes = ShapeVector{{3, 4, 10, 10}, {4, 8, 10, 10}, {3}}; + this->input_shapes = StaticShapeVector{{3, 4, 10, 10}, {4, 8, 10, 10}, {3}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Batch size inconsistent between class_probs")); @@ -94,7 +94,7 @@ TYPED_TEST_P(ProposalTest, image_shape_input_not_compatible_shape) { this->op = this->make_op(class_probs, class_bbox_deltas, image_shape, this->make_attrs(5)); - this->input_shapes = ShapeVector{{3, 4, 10, 10}, {3, 8, 10, 10}, {5}}; + this->input_shapes = StaticShapeVector{{3, 4, 10, 10}, {3, 8, 10, 10}, {5}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Image_shape must be 1-D tensor and has got 3 or 4 elements")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/psroi_pooling_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/psroi_pooling_shape_inference_test.cpp index 63297c5cffd283..1154f26422d67e 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/psroi_pooling_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/psroi_pooling_shape_inference_test.cpp @@ -31,7 +31,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, default_ctor_avg_mode) { op->set_spatial_scale(scale); op->set_mode("average"); - input_shapes = ShapeVector{{1, 45, 10, 10}, {3, 5}}; + input_shapes = StaticShapeVector{{1, 45, 10, 10}, {3, 5}}; auto shape_infer = make_shape_inference(op); output_shapes = shape_inference(op.get(), input_shapes); @@ -48,7 +48,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, default_ctor_bilinear_mode) { op->set_spatial_scale(scale); op->set_mode("bilinear"); - input_shapes = ShapeVector{{1, 75, 10, 10}, {2, 5}}; + input_shapes = StaticShapeVector{{1, 75, 10, 10}, {2, 5}}; auto shape_infer = make_shape_inference(op); output_shapes = shape_inference(op.get(), input_shapes); @@ -62,7 +62,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, inputs_dynamic_rank) { op = make_op(feat, rois, 4, group, scale, 0, 0, "average"); - input_shapes = ShapeVector{{2, 36, 100, 100}, {10, 5}}; + input_shapes = StaticShapeVector{{2, 36, 100, 100}, {10, 5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -75,7 +75,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, inputs_static_rank) { op = make_op(feat, rois, 2, 1, scale, bins_x, bins_y, "bilinear"); - input_shapes = ShapeVector{{2, 24, 20, 100}, {1, 5}}; + input_shapes = StaticShapeVector{{2, 24, 20, 100}, {1, 5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -88,7 +88,7 @@ TEST_F(PSROIPoolingV0StaticShapeInferenceTest, invalid_rois_batch_size) { op = make_op(feat, rois, 2, 1, scale, bins_x, bins_y, "bilinear"); - input_shapes = ShapeVector{{2, 24, 20, 100}, {1, 6}}; + input_shapes = StaticShapeVector{{2, 24, 20, 100}, {1, 6}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/random_uniform_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/random_uniform_shape_inference_test.cpp index 3eb47d417ebe80..ef9c0c20df8b62 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/random_uniform_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/random_uniform_shape_inference_test.cpp @@ -32,7 +32,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, default_ctor_no_args) { {1, {element::i32, ov::Shape{1}, &min}}, {2, {element::i32, ov::Shape{}, &max}}}; - input_shapes = ShapeVector{{4}, {1}, {}}; + input_shapes = StaticShapeVector{{4}, {1}, {}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -53,7 +53,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_dynamic_rank) { {1, {element::i64, ov::Shape{}, &min}}, {2, {element::i64, ov::Shape{}, &max}}}; - input_shapes = ShapeVector{{5}, {}, {}}; + input_shapes = StaticShapeVector{{5}, {}, {}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -74,7 +74,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_static_rank) { {1, {element::f32, ov::Shape{1}, &min}}, {2, {element::f32, ov::Shape{1}, &max}}}; - input_shapes = ShapeVector{{3}, {}, {}}; + input_shapes = StaticShapeVector{{3}, {}, {}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -82,13 +82,13 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_static_rank) { } TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_as_const) { - const auto out_shape = op::v0::Constant::create(element::i32, Shape{6}, {2, 1, 3, 5, 1, 7}); - const auto min_val = op::v0::Constant::create(element::f16, Shape{}, {2}); - const auto max_val = op::v0::Constant::create(element::f16, Shape{1}, {16}); + const auto out_shape = op::v0::Constant::create(element::i32, ov::Shape{6}, {2, 1, 3, 5, 1, 7}); + const auto min_val = op::v0::Constant::create(element::f16, ov::Shape{}, {2}); + const auto max_val = op::v0::Constant::create(element::f16, ov::Shape{1}, {16}); op = make_op(out_shape, min_val, max_val, element::f16, global_seed, op_seed); - input_shapes = ShapeVector{{6}, {}, {1}}; + input_shapes = StaticShapeVector{{6}, {}, {1}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -97,7 +97,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_as_const) { TEST_F(RandomUniformV8StaticShapeInferenceTest, some_inputs_are_const_some_dynamic) { const auto out_shape = std::make_shared(element::i32, PartialShape::dynamic()); - const auto min_val = op::v0::Constant::create(element::f32, Shape{}, {2}); + const auto min_val = op::v0::Constant::create(element::f32, ov::Shape{}, {2}); const auto max_val = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(out_shape, min_val, max_val, element::f32, global_seed, op_seed); @@ -108,7 +108,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, some_inputs_are_const_some_dynam const auto const_data = std::unordered_map{{0, {element::i32, ov::Shape{3}, shape}}, {2, {element::f32, ov::Shape{1}, &max}}}; - input_shapes = ShapeVector{{3}, {}, {}}; + input_shapes = StaticShapeVector{{3}, {}, {}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -117,7 +117,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, some_inputs_are_const_some_dynam TEST_F(RandomUniformV8StaticShapeInferenceTest, min_not_lt_max) { const auto out_shape = std::make_shared(element::i32, PartialShape::dynamic()); - const auto min_val = op::v0::Constant::create(element::i64, Shape{}, {2}); + const auto min_val = op::v0::Constant::create(element::i64, ov::Shape{}, {2}); const auto max_val = std::make_shared(element::i64, PartialShape::dynamic()); op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed); @@ -128,7 +128,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, min_not_lt_max) { const auto const_data = std::unordered_map{{0, {element::i32, ov::Shape{3}, shape}}, {2, {element::i64, ov::Shape{1}, &max}}}; - input_shapes = ShapeVector{{3}, {}, {}}; + input_shapes = StaticShapeVector{{3}, {}, {}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), NodeValidationFailure, HasSubstr("Min value must be less than max value. Got min value:")); @@ -136,7 +136,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, min_not_lt_max) { TEST_F(RandomUniformV8StaticShapeInferenceTest, out_shape_input_not_rank_1) { const auto out_shape = std::make_shared(element::i32, PartialShape::dynamic()); - const auto min_val = op::v0::Constant::create(element::i64, Shape{}, {2}); + const auto min_val = op::v0::Constant::create(element::i64, ov::Shape{}, {2}); const auto max_val = std::make_shared(element::i64, PartialShape::dynamic()); op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed); @@ -147,7 +147,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, out_shape_input_not_rank_1) { const auto const_data = std::unordered_map{{0, {element::i32, ov::Shape{3}, shape}}, {2, {element::i64, ov::Shape{1}, &max}}}; - input_shapes = ShapeVector{{3, 1}, {}, {}}; + input_shapes = StaticShapeVector{{3, 1}, {}, {}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), NodeValidationFailure, HasSubstr("The rank of the tensor defining output shape must be equal to 1")); @@ -160,7 +160,7 @@ TEST_F(RandomUniformV8StaticShapeInferenceTest, all_inputs_dynamic_no_const_data op = make_op(out_shape, min_val, max_val, element::i64, global_seed, op_seed); - input_shapes = ShapeVector{{3}, {}, {}}; + input_shapes = StaticShapeVector{{3}, {}, {}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Static shape inference lacks constant data on port")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/range_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/range_shape_inference_test.cpp index 27eca3d57f7f8f..b5125a761cac8c 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/range_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/range_shape_inference_test.cpp @@ -19,27 +19,27 @@ TEST(StaticShapeInferenceTest, Rangev4_i32) { auto range = make_shared(start, stop, step, element::i32); int32_t start_v = 2, stop_v = 0, step_v = -2; - auto const_data = std::unordered_map{{0, {element::i32, Shape{}, &start_v}}, - {1, {element::i32, Shape{}, &stop_v}}, - {2, {element::i32, Shape{}, &step_v}}}; + auto const_data = std::unordered_map{{0, {element::i32, ov::Shape{}, &start_v}}, + {1, {element::i32, ov::Shape{}, &stop_v}}, + {2, {element::i32, ov::Shape{}, &step_v}}}; - auto output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + auto output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1})); step_v = -1; - output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2})); start_v = -19, stop_v = 19, step_v = 1; - output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{38})); step_v = 3; - output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{13})); start_v = 20, stop_v = -19, step_v = 1; - output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{0})); } @@ -50,18 +50,18 @@ TEST(StaticShapeInferenceTest, Rangev4_f32) { auto range = make_shared(start, stop, step, element::f32); float start_v = 0.f, stop_v = 1.f, step_v = .25f; - auto const_data = std::unordered_map{{0, {element::f32, Shape{}, &start_v}}, - {1, {element::f32, Shape{}, &stop_v}}, - {2, {element::f32, Shape{}, &step_v}}}; + auto const_data = std::unordered_map{{0, {element::f32, ov::Shape{}, &start_v}}, + {1, {element::f32, ov::Shape{}, &stop_v}}, + {2, {element::f32, ov::Shape{}, &step_v}}}; - auto output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + auto output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{4})); start_v = -1.f; - output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{8})); stop_v = .875f; - output_shapes = shape_inference(range.get(), ShapeVector{{}, {}, {}}, const_data); + output_shapes = shape_inference(range.get(), StaticShapeVector{{}, {}, {}}, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{8})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reduce_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reduce_shape_inference_test.cpp index 8effd0939b9688..d54d730f417afa 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reduce_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reduce_shape_inference_test.cpp @@ -16,7 +16,7 @@ template class ReduceStaticShapeInferenceTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - this->output_shapes = ShapeVector(1); + this->output_shapes = StaticShapeVector(1); } }; @@ -25,10 +25,10 @@ TYPED_TEST_SUITE_P(ReduceStaticShapeInferenceTest); TYPED_TEST_P(ReduceStaticShapeInferenceTest, default_ctor) { this->op = this->make_op(); this->op->set_keep_dims(true); - this->input_shapes = ShapeVector{{1, 6, 7, 8, 4}, {3}}; + this->input_shapes = StaticShapeVector{{1, 6, 7, 8, 4}, {3}}; int32_t axes_val[] = {0, 1, 3}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{3}, axes_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, axes_val}}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes, constant_data); EXPECT_EQ(this->output_shapes.size(), 1); @@ -37,7 +37,7 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, default_ctor) { TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_constant) { const auto data = std::make_shared(element::dynamic, PartialShape{-1, -1, -1, -1}); - const auto axes = std::make_shared(element::i32, Shape{2}, std::vector{1, 3}); + const auto axes = std::make_shared(element::i32, ov::Shape{2}, std::vector{1, 3}); this->op = this->make_op(data, axes, false); this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}}; @@ -50,13 +50,13 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_constant) { TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_param) { const auto data = std::make_shared(element::dynamic, PartialShape{-1, -1, -1, -1}); - const auto axes = std::make_shared(element::i32, Shape{2}); + const auto axes = std::make_shared(element::i32, ov::Shape{2}); this->op = this->make_op(data, axes, false); this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}}; int32_t axes_val[] = {1, 3}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{2}, axes_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{2}, axes_val}}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes, constant_data); EXPECT_EQ(this->output_shapes.size(), 1); @@ -65,7 +65,7 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_param) { TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_constant_keep_dims) { const auto data = std::make_shared(element::dynamic, PartialShape{-1, -1, -1, -1}); - const auto axes = std::make_shared(element::i32, Shape{2}, std::vector{1, 3}); + const auto axes = std::make_shared(element::i32, ov::Shape{2}, std::vector{1, 3}); this->op = this->make_op(data, axes, true); this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}}; @@ -78,13 +78,13 @@ TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_constant_keep_dims) { TYPED_TEST_P(ReduceStaticShapeInferenceTest, axes_param_keep_dims) { const auto data = std::make_shared(element::dynamic, PartialShape{-1, -1, -1, -1}); - const auto axes = std::make_shared(element::i32, Shape{2}); + const auto axes = std::make_shared(element::i32, ov::Shape{2}); this->op = this->make_op(data, axes, true); this->input_shapes = {StaticShape{3, 6, 5, 8}, StaticShape{2}}; int32_t axes_val[] = {1, 3}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{2}, axes_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{2}, axes_val}}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes, constant_data); EXPECT_EQ(this->output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/region_yolo_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/region_yolo_shape_inference_test.cpp index 8bc9e59b826c25..552427dc54f554 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/region_yolo_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/region_yolo_shape_inference_test.cpp @@ -21,7 +21,7 @@ TEST_F(StaticShapeRegionYoloTest, default_ctor_do_soft_max_no_args) { op->set_axis(-2); op->set_end_axis(3); - input_shapes = ShapeVector{{10, 8, 12, 6}}; + input_shapes = StaticShapeVector{{10, 8, 12, 6}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -32,7 +32,7 @@ TEST_F(StaticShapeRegionYoloTest, data_input_is_dynamic_rank) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); op = make_op(data, 0, 0, 0, true, std::vector(), 1, 3); - input_shapes = ShapeVector{{2, 2, 3, 4}}; + input_shapes = StaticShapeVector{{2, 2, 3, 4}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -43,7 +43,7 @@ TEST_F(StaticShapeRegionYoloTest, data_input_is_static_rank) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic(4)); op = make_op(data, 5, 4, 20, false, std::vector{0, 1}, 1, 3); - input_shapes = ShapeVector{{2, 5, 6, 7}}; + input_shapes = StaticShapeVector{{2, 5, 6, 7}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -54,7 +54,7 @@ TEST_F(StaticShapeRegionYoloTest, data_shape_not_compatible_rank_4) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(data, 5, 4, 20, false, std::vector{0, 1}, 1, 3); - OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector({{2, 20, 12, 24, 1}})), + OV_EXPECT_THROW(shape_inference(op.get(), StaticShapeVector({{2, 20, 12, 24, 1}})), NodeValidationFailure, HasSubstr("Input must be a tensor of rank 4, but got")); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reorg_yolo_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reorg_yolo_shape_inference_test.cpp index 2599aad4b38738..58ebcfff8bc608 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reorg_yolo_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reorg_yolo_shape_inference_test.cpp @@ -19,7 +19,7 @@ TEST_F(StaticShapeReorgYoloTest, default_ctor_no_args) { op = make_op(); op->set_strides(3); - input_shapes = ShapeVector{{2, 9, 12, 6}}; + input_shapes = StaticShapeVector{{2, 9, 12, 6}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -30,7 +30,7 @@ TEST_F(StaticShapeReorgYoloTest, data_input_is_dynamic_rank) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); op = make_op(data, 2); - input_shapes = ShapeVector{{2, 12, 12, 24}}; + input_shapes = StaticShapeVector{{2, 12, 12, 24}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -41,7 +41,7 @@ TEST_F(StaticShapeReorgYoloTest, data_input_is_static_rank) { const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic(4)); op = make_op(data, 2); - input_shapes = ShapeVector{{2, 20, 12, 24}}; + input_shapes = StaticShapeVector{{2, 20, 12, 24}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -52,7 +52,7 @@ TEST_F(StaticShapeReorgYoloTest, data_shape_not_compatible_rank_4) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(data, 2); - OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector({{2, 20, 12, 24, 1}})), + OV_EXPECT_THROW(shape_inference(op.get(), StaticShapeVector({{2, 20, 12, 24, 1}})), NodeValidationFailure, HasSubstr("[N, C, H, W] input shape is required")); } @@ -61,7 +61,7 @@ TEST_F(StaticShapeReorgYoloTest, h_dim_not_div_by_stride) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); op = make_op(data, 2); - OV_EXPECT_THROW(shape_inference(op.get(), ShapeVector{{2, 20, 11, 24}}), + OV_EXPECT_THROW(shape_inference(op.get(), StaticShapeVector{{2, 20, 11, 24}}), NodeValidationFailure, HasSubstr("H and W should be divisible by stride")); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reshape_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reshape_shape_inference_test.cpp index e193553796bc4d..2f16c8a14220ad 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reshape_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reshape_shape_inference_test.cpp @@ -20,7 +20,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, default_ctor_no_args) { int64_t shape_pattern[] = {2, 4, 0, 1, -1}; auto const_data = std::unordered_map{{1, Tensor(element::i64, ov::Shape{5}, shape_pattern)}}; - input_shapes = ShapeVector{{2, 9, 12, 8}, {5}}; + input_shapes = StaticShapeVector{{2, 9, 12, 8}, {5}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); @@ -36,7 +36,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, all_inputs_are_dynamic_rank) { const auto pattern = std::make_shared(element::i64, PartialShape::dynamic()); op = make_op(data, pattern, true); - input_shapes = ShapeVector{{9, 24, 8}, {5}}; + input_shapes = StaticShapeVector{{9, 24, 8}, {5}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -51,7 +51,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, all_inputs_are_static_rank) { const auto pattern = std::make_shared(element::i64, PartialShape::dynamic(1)); op = make_op(data, pattern, false); - input_shapes = ShapeVector{{9, 24, 8}, {4}}; + input_shapes = StaticShapeVector{{9, 24, 8}, {4}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -64,7 +64,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, pattern_with_special_values) { op = make_op(data, pattern, true); - input_shapes = ShapeVector{{3, 6, 5, 5}, {2}}; + input_shapes = StaticShapeVector{{3, 6, 5, 5}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.front(), StaticShape({3, 150})); @@ -76,7 +76,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, reshape_to_empty_volume) { op = make_op(data, pattern, false); - input_shapes = ShapeVector{{0, 2, 2}, {2}}; + input_shapes = StaticShapeVector{{0, 2, 2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.front(), StaticShape({0, 4})); @@ -87,7 +87,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, reshape_pattern_not_defined) { const auto pattern = std::make_shared(element::i64, PartialShape::dynamic()); op = make_op(data, pattern, true); - input_shapes = ShapeVector{{9, 24, 8}, {5}}; + input_shapes = StaticShapeVector{{9, 24, 8}, {5}}; OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Static shape inference lacks constant data on port 1")); @@ -98,7 +98,7 @@ TEST_F(ReshapeV1StaticShapeInferenceTest, shape_pattern_as_constant) { const auto pattern = op::v0::Constant::create(element::i32, ov::Shape{3}, {2, 4, 1}); op = make_op(data, pattern, false); - input_shapes = ShapeVector{{9, 24, 8}, {4}}; + input_shapes = StaticShapeVector{{9, 24, 8}, {4}}; OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("is incompatible with input shape")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_sequence_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_sequence_shape_inference_test.cpp index 2ad1978785c427..0267c201000406 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_sequence_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_sequence_shape_inference_test.cpp @@ -26,7 +26,7 @@ class ReverseSequenceV0StaticShapeInferenceTest : public OpStaticShapeInferenceT TEST_F(ReverseSequenceV0StaticShapeInferenceTest, default_batch_seq_axes) { auto op = make_op(data, seq_lengths); - input_shapes = ShapeVector{{4, 3, 2}, {4}}; + input_shapes = StaticShapeVector{{4, 3, 2}, {4}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2})); @@ -35,7 +35,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, default_batch_seq_axes) { TEST_F(ReverseSequenceV0StaticShapeInferenceTest, set_batch_seq_axes) { auto op = make_op(data, seq_lengths, -1, 1); - input_shapes = ShapeVector{{4, 3, 2}, {2}}; + input_shapes = StaticShapeVector{{4, 3, 2}, {2}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2})); @@ -44,14 +44,14 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, set_batch_seq_axes) { TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_input_shapes_count) { auto op = make_op(data, seq_lengths); - input_shapes = ShapeVector{{1, 2, 4}}; + input_shapes = StaticShapeVector{{1, 2, 4}}; EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure); } TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_data_shape_rank) { auto op = make_op(data, seq_lengths); - input_shapes = ShapeVector{{4}, {4}}; + input_shapes = StaticShapeVector{{4}, {4}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Data input rank should be equal or greater than 2. Got: ")); @@ -60,7 +60,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_data_shape_rank) { TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_sequence_shape_rank) { auto op = make_op(data, seq_lengths); - input_shapes = ShapeVector{{4, 5, 6}, {2, 2}}; + input_shapes = StaticShapeVector{{4, 5, 6}, {2, 2}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Sequence lengths rank must be equal to 1. Got: ")); @@ -69,7 +69,7 @@ TEST_F(ReverseSequenceV0StaticShapeInferenceTest, invalid_sequence_shape_rank) { TEST_F(ReverseSequenceV0StaticShapeInferenceTest, default_ctor) { auto op = make_op(); - input_shapes = ShapeVector{{11, 2, 3}, {11}}; + input_shapes = StaticShapeVector{{11, 2, 3}, {11}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes[0], StaticShape({11, 2, 3})); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_shape_inference_test.cpp index 34b010cf353e8b..9809912f2f3aa0 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/reverse_shape_inference_test.cpp @@ -25,9 +25,9 @@ class ReverseV1StaticShapeInferenceTest : public OpStaticShapeInferenceTest{{1, {element::i8, Shape{3}, axes_val}}}; + auto const_data = std::unordered_map{{1, {element::i8, ov::Shape{3}, axes_val}}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2, 4})); } TEST_F(ReverseV1StaticShapeInferenceTest, axes_mask_as_constant) { - auto op = - make_op(data, Constant::create(element::boolean, Shape{4}, {true, false, false, true}), Reverse::Mode::MASK); + auto op = make_op(data, + Constant::create(element::boolean, ov::Shape{4}, {true, false, false, true}), + Reverse::Mode::MASK); - input_shapes = ShapeVector{{4, 3, 2, 4}, {4}}; + input_shapes = StaticShapeVector{{4, 3, 2, 4}, {4}}; output_shapes = shape_inference(op.get(), input_shapes); @@ -59,18 +60,19 @@ TEST_F(ReverseV1StaticShapeInferenceTest, axes_mask_in_constant_data) { auto op = make_op(data, std::make_shared(element::boolean, PartialShape::dynamic()), Reverse::Mode::MASK); - input_shapes = ShapeVector{{4, 3, 2, 4}, {4}}; + input_shapes = StaticShapeVector{{4, 3, 2, 4}, {4}}; bool axes_val[] = {true, true, false, false}; - auto const_data = std::unordered_map{{1, {element::boolean, Shape{4}, axes_val}}}; + auto const_data = std::unordered_map{{1, {element::boolean, ov::Shape{4}, axes_val}}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes[0], StaticShape({4, 3, 2, 4})); } TEST_F(ReverseV1StaticShapeInferenceTest, invalid_axes_mask_length) { - auto op = make_op(data, Constant::create(element::boolean, Shape{3}, {false, false, true}), Reverse::Mode::MASK); + auto op = + make_op(data, Constant::create(element::boolean, ov::Shape{3}, {false, false, true}), Reverse::Mode::MASK); - input_shapes = ShapeVector{{1, 2, 4, 3}, {3}}; + input_shapes = StaticShapeVector{{1, 2, 4, 3}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("The number of elements in the reversed_axes tensor (3) must match the input data tensor " @@ -78,9 +80,9 @@ TEST_F(ReverseV1StaticShapeInferenceTest, invalid_axes_mask_length) { } TEST_F(ReverseV1StaticShapeInferenceTest, axes_index_out_of_data_rank) { - auto op = make_op(data, Constant::create(element::u8, Shape{3}, {0, 20, 3}), Reverse::Mode::INDEX); + auto op = make_op(data, Constant::create(element::u8, ov::Shape{3}, {0, 20, 3}), Reverse::Mode::INDEX); - input_shapes = ShapeVector{{1, 2, 4, 3}, {3}}; + input_shapes = StaticShapeVector{{1, 2, 4, 3}, {3}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Some of the provided axes (AxisSet{0, 3, 20}) are out of bounds (input rank: 4)")); @@ -90,10 +92,10 @@ TEST_F(ReverseV1StaticShapeInferenceTest, default_ctor) { auto op = make_op(); op->set_mode(Reverse::Mode::INDEX); - input_shapes = ShapeVector{{11, 2, 3}, {3}}; + input_shapes = StaticShapeVector{{11, 2, 3}, {3}}; int64_t axes_val[] = {-1, 2, 0}; - auto const_data = std::unordered_map{{1, {element::i64, Shape{3}, axes_val}}}; + auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, axes_val}}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes[0], StaticShape({11, 2, 3})); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp index 7dd2ff086252f9..cb95ef33de8b8e 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rms_norm_shape_inference_test.cpp @@ -24,7 +24,7 @@ TEST(StaticShapeInferenceTest, RMSNormStaticShapeInferenceTestDefaultCtor) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}, StaticShape{1}}; int32_t axis_val = -1; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; const auto static_output_shapes = shape_inference(op.get(), static_input_shapes, const_data); EXPECT_EQ(static_output_shapes[0], StaticShape({2, 3, 8, 6})); } @@ -38,7 +38,7 @@ TEST(StaticShapeInferenceTest, RMSNormStaticShapeInferenceTest2ins) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}}; int32_t axis_val = -1; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; const auto static_output_shapes = shape_inference(op.get(), static_input_shapes, const_data); EXPECT_EQ(static_output_shapes[0], StaticShape({2, 3, 8, 6})); } @@ -53,7 +53,7 @@ TEST(StaticShapeInferenceTest, RMSNormStaticShapeInferenceTest3ins) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}, StaticShape{1}}; int32_t axis_val = -1; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; const auto static_output_shapes = shape_inference(op.get(), static_input_shapes, const_data); EXPECT_EQ(static_output_shapes[0], StaticShape({2, 3, 8, 6})); } @@ -67,7 +67,7 @@ TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisValParam) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}}; int32_t axis_val = 5; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), NodeValidationFailure, @@ -76,7 +76,7 @@ TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisValParam) { TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisValConst) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); - const auto axes = std::make_shared(element::i32, Shape{}, 5); + const auto axes = std::make_shared(element::i32, ov::Shape{}, 5); const auto eps = 1e-5f; const auto op = std::make_shared(data, axes, eps); @@ -97,7 +97,7 @@ TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisShapeDim) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{5}}; int32_t axis_val = 5; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), NodeValidationFailure, @@ -113,7 +113,7 @@ TEST(StaticShapeInferenceTest, RMSNormIncorrectAxisShapeRank) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1, 5}}; int32_t axis_val = 5; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), NodeValidationFailure, @@ -130,7 +130,7 @@ TEST(StaticShapeInferenceTest, RMSNormIncorrectScaleShape) { std::vector static_input_shapes = {StaticShape{2, 3, 8, 6}, StaticShape{1}, StaticShape{6, 1}}; int32_t axis_val = -1; - const auto const_data = std::unordered_map{{1, {element::i32, Shape{1}, &axis_val}}}; + const auto const_data = std::unordered_map{{1, {element::i32, ov::Shape{1}, &axis_val}}}; OV_EXPECT_THROW(shape_inference(op.get(), static_input_shapes, const_data), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_cell_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_cell_shape_inference_test.cpp index 5b81c2307e3f2b..d2bb359b6344e3 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_cell_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_cell_shape_inference_test.cpp @@ -9,12 +9,7 @@ using namespace ov; using namespace ov::intel_cpu; -class RNNCellV0StaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(1); - } -}; +class RNNCellV0StaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(RNNCellV0StaticShapeInferenceTest, default_ctor) { constexpr size_t batch_size = 2; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_seq_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_seq_shape_inference_test.cpp index 304dd8a6311920..5e65589661bb9a 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_seq_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/rnn_seq_shape_inference_test.cpp @@ -9,12 +9,7 @@ using namespace ov; using namespace ov::intel_cpu; -class RNNSequenceV5StaticShapeInferenceTest : public OpStaticShapeInferenceTest { -protected: - void SetUp() override { - this->output_shapes = ShapeVector(1); - } -}; +class RNNSequenceV5StaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(RNNSequenceV5StaticShapeInferenceTest, default_ctor) { constexpr size_t batch_size = 2; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_align_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_align_shape_inference_test.cpp index bd99ee00c8113b..37e3dcfb7f59a9 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_align_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_align_shape_inference_test.cpp @@ -26,7 +26,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, default_ctor_no_args) { this->op->set_pooled_h(2); this->op->set_pooled_w(2); - this->input_shapes = ShapeVector{{2, 3, 5, 5}, {7, 4}, {7}}; + this->input_shapes = StaticShapeVector{{2, 3, 5, 5}, {7, 4}, {7}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), 1); @@ -40,7 +40,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, all_inputs_dynamic_rank) { this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG); - this->input_shapes = ShapeVector{{2, 3, 5, 5}, {10, 4}, {10}}; + this->input_shapes = StaticShapeVector{{2, 3, 5, 5}, {10, 4}, {10}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), 1); @@ -54,7 +54,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, all_inputs_static_rank) { this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG); - this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 4}, {10}}; + this->input_shapes = StaticShapeVector{{2, 8, 5, 5}, {10, 4}, {10}}; this->output_shapes = shape_inference(this->op.get(), this->input_shapes); EXPECT_EQ(this->output_shapes.size(), 1); @@ -68,7 +68,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, incompatible_input_rank) { this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG); - this->input_shapes = ShapeVector{{2, 8, 5}, {10, 3}, {10}}; + this->input_shapes = StaticShapeVector{{2, 8, 5}, {10, 3}, {10}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Expected a 4D tensor for the input data")); @@ -81,7 +81,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, incompatible_rois_rank) { this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG); - this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 3, 1}, {10}}; + this->input_shapes = StaticShapeVector{{2, 8, 5, 5}, {10, 3, 1}, {10}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Expected a 2D tensor for the ROIs input")); @@ -93,7 +93,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, incompatible_batch_indicies_rank) { const auto batch_indices = std::make_shared(element::i8, PartialShape::dynamic()); this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG); - this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 3}, {}}; + this->input_shapes = StaticShapeVector{{2, 8, 5, 5}, {10, 3}, {}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("Expected a 1D tensor for the batch indices input.")); @@ -106,7 +106,7 @@ TYPED_TEST_P(StaticShapeROIAlignTest, invalid_rois_2nd_dim) { this->op = this->make_op(data, rois, batch_indices, 2, 2, 2, 1.0f, TypeParam::PoolingMode::AVG); - this->input_shapes = ShapeVector{{2, 8, 5, 5}, {10, 3}, {10}}; + this->input_shapes = StaticShapeVector{{2, 8, 5, 5}, {10, 3}, {10}}; OV_EXPECT_THROW(shape_inference(this->op.get(), this->input_shapes), NodeValidationFailure, HasSubstr("op dimension is expected to be equal to 4")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_pooling_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_pooling_shape_inference_test.cpp index 7805c78c322fe1..ba2c53c2138ee5 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_pooling_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/roi_pooling_shape_inference_test.cpp @@ -25,7 +25,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, default_ctor) { op->set_method("max"); op->set_spatial_scale(0.34f); - input_shapes = ShapeVector{{1, 5, 10, 10}, {2, 5}}; + input_shapes = StaticShapeVector{{1, 5, 10, 10}, {2, 5}}; auto shape_infer = make_shape_inference(op); output_shapes = shape_inference(op.get(), input_shapes); @@ -39,7 +39,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, inputs_dynamic_rank) { op = make_op(feat, rois, ov::Shape{5, 5}, 0.9f); - input_shapes = ShapeVector{{2, 3, 100, 100}, {10, 5}}; + input_shapes = StaticShapeVector{{2, 3, 100, 100}, {10, 5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -52,7 +52,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, inputs_static_rank) { op = make_op(feat, rois, ov::Shape{7, 5}, 1.9f, "max"); - input_shapes = ShapeVector{{2, 3, 20, 100}, {10, 5}}; + input_shapes = StaticShapeVector{{2, 3, 20, 100}, {10, 5}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -65,7 +65,7 @@ TEST_F(ROIPoolingV0StaticShapeInferenceTest, invalid_rois_batch_size) { op = make_op(feat, rois, ov::Shape{7, 5}, 1.9f, "max"); - input_shapes = ShapeVector{{2, 3, 20, 100}, {10, 6}}; + input_shapes = StaticShapeVector{{2, 3, 20, 100}, {10, 6}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/roll_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/roll_shape_inference_test.cpp index 135f8651fa5d34..857cfbbac12c0a 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/roll_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/roll_shape_inference_test.cpp @@ -24,7 +24,7 @@ class RollV7StaticShapeInferenceTest : public OpStaticShapeInferenceTest(ov::element::f32, ov::PartialShape::dynamic()); const auto shift = std::make_shared(ov::element::i64, ov::PartialShape::dynamic()); - const auto axes = Constant::create(element::i64, Shape{2}, {-2, 1}); + const auto axes = Constant::create(element::i64, ov::Shape{2}, {-2, 1}); const auto op = make_op(arg, shift, axes); @@ -43,7 +43,7 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_in_const_map) { auto axes_val = std::array{0, 1, -1}; const auto constant_data = - std::unordered_map{{2, {element::i32, Shape{axes_val.size()}, axes_val.data()}}}; + std::unordered_map{{2, {element::i32, ov::Shape{axes_val.size()}, axes_val.data()}}}; input_shapes = {StaticShape{3, 3, 3}, StaticShape{3}, StaticShape{3}}; @@ -60,7 +60,7 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_over_arg_rank) { auto axes_val = std::array{0, 3, -1}; const auto constant_data = - std::unordered_map{{2, {element::i32, Shape{axes_val.size()}, axes_val.data()}}}; + std::unordered_map{{2, {element::i32, ov::Shape{axes_val.size()}, axes_val.data()}}}; input_shapes = {StaticShape{3, 3, 3}, StaticShape{3}, StaticShape{3}}; @@ -78,7 +78,7 @@ TEST_F(RollV7StaticShapeInferenceTest, axes_has_negative_after_normalization) { auto axes_val = std::array{-4, 2, -1}; const auto constant_data = - std::unordered_map{{2, {element::i64, Shape{axes_val.size()}, axes_val.data()}}}; + std::unordered_map{{2, {element::i64, ov::Shape{axes_val.size()}, axes_val.data()}}}; input_shapes = {StaticShape{3, 3, 3}, StaticShape{3}, StaticShape{3}}; @@ -92,7 +92,7 @@ TEST_F(RollV7StaticShapeInferenceTest, default_ctor) { auto axes_val = std::array{-4, 2, -1, 1}; const auto constant_data = - std::unordered_map{{2, {element::i64, Shape{axes_val.size()}, axes_val.data()}}}; + std::unordered_map{{2, {element::i64, ov::Shape{axes_val.size()}, axes_val.data()}}}; input_shapes = {StaticShape{3, 2, 5, 1}, StaticShape{}, StaticShape{4}}; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scaled_dot_product_attention_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scaled_dot_product_attention_shape_inference_test.cpp index 7ee414710c06de..8d2010a9059785 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scaled_dot_product_attention_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scaled_dot_product_attention_shape_inference_test.cpp @@ -18,7 +18,7 @@ class ScaledDotProductAttentionV13StaticShapeInferenceTest : public OpStaticShap TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, default_ctor) { op = make_op(); - input_shapes = ShapeVector{{3, 2, 3, 4}, {2, 5, 4}, {1, 5, 6}, {1, 3, 5}, {}}; + input_shapes = StaticShapeVector{{3, 2, 3, 4}, {2, 5, 4}, {1, 5, 6}, {1, 3, 5}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({3, 2, 3, 6})); @@ -33,22 +33,22 @@ TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, dynamic_shapes) { auto causal = false; op = make_op(query, key, value, attention_mask, scale, causal); - input_shapes = ShapeVector{{2, 3, 4}, {2, 5, 4}, {2, 5, 6}, {1, 3, 5}, {}}; + input_shapes = StaticShapeVector{{2, 3, 4}, {2, 5, 4}, {2, 5, 6}, {1, 3, 5}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({2, 3, 6})); } TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, static_shapes) { - const auto query = std::make_shared(element::f32, Shape{2, 3, 4}); - const auto key = std::make_shared(element::f32, Shape{2, 5, 4}); - const auto value = std::make_shared(element::f32, Shape{2, 5, 6}); - const auto attention_mask = std::make_shared(element::f32, Shape{1, 3, 5}); - const auto scale = std::make_shared(element::f32, Shape{1}); + const auto query = std::make_shared(element::f32, ov::Shape{2, 3, 4}); + const auto key = std::make_shared(element::f32, ov::Shape{2, 5, 4}); + const auto value = std::make_shared(element::f32, ov::Shape{2, 5, 6}); + const auto attention_mask = std::make_shared(element::f32, ov::Shape{1, 3, 5}); + const auto scale = std::make_shared(element::f32, ov::Shape{1}); auto causal = false; op = make_op(query, key, value, attention_mask, scale, causal); - input_shapes = ShapeVector{{2, 3, 4}, {2, 5, 4}, {2, 5, 6}, {1, 3, 5}, {1}}; + input_shapes = StaticShapeVector{{2, 3, 4}, {2, 5, 4}, {2, 5, 6}, {1, 3, 5}, {1}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({2, 3, 6})); @@ -57,13 +57,13 @@ TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, static_shapes) { TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, mixed_shapes) { const auto query = std::make_shared(element::f32, PartialShape{2, {2, 3}, 4}); const auto key = std::make_shared(element::f32, PartialShape{-1, {2, 7}, -1}); - const auto value = std::make_shared(element::f32, Shape{2, 5, 6}); + const auto value = std::make_shared(element::f32, ov::Shape{2, 5, 6}); const auto attention_mask = std::make_shared(element::f32, PartialShape{1, {3, 5}, 5}); - const auto scale = std::make_shared(element::f32, Shape{}); + const auto scale = std::make_shared(element::f32, ov::Shape{}); auto causal = false; op = make_op(query, key, value, attention_mask, scale, causal); - input_shapes = ShapeVector{{2, 3, 4}, {2, 5, 4}, {2, 5, 6}, {1, 3, 5}, {}}; + input_shapes = StaticShapeVector{{2, 3, 4}, {2, 5, 4}, {2, 5, 6}, {1, 3, 5}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({2, 3, 6})); @@ -79,7 +79,7 @@ TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, attention_L_broadca op = make_op(query, key, value, attention_mask, scale, causal); - input_shapes = ShapeVector{{2, 8, 16, 32}, {2, 8, 24, 32}, {2, 8, 24, 48}, {1, 1, 24}, {}}; + input_shapes = StaticShapeVector{{2, 8, 16, 32}, {2, 8, 24, 32}, {2, 8, 24, 48}, {1, 1, 24}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({2, 8, 16, 48})); @@ -95,7 +95,7 @@ TEST_F(ScaledDotProductAttentionV13StaticShapeInferenceTest, attention_S_broadca op = make_op(query, key, value, attention_mask, scale, causal); - input_shapes = ShapeVector{{2, 8, 16, 32}, {2, 8, 24, 32}, {2, 8, 24, 48}, {1, 16, 1}, {}}; + input_shapes = StaticShapeVector{{2, 8, 16, 32}, {2, 8, 24, 32}, {2, 8, 24, 48}, {1, 16, 1}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({2, 8, 16, 48})); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_elements_update_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_elements_update_shape_inference_test.cpp index f4d5dbf723fd20..b0dbdcc7fe3135 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_elements_update_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_elements_update_shape_inference_test.cpp @@ -25,9 +25,9 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, default_ctor) { const auto op = make_op(); int32_t axis = 1; - const auto const_data = std::unordered_map{{3, {element::i32, Shape{1}, &axis}}}; + const auto const_data = std::unordered_map{{3, {element::i32, ov::Shape{1}, &axis}}}; - input_shapes = ShapeVector{{1000, 256, 10, 13}, {25, 125, 3, 1}, {25, 125, 3, 1}, {1}}; + input_shapes = StaticShapeVector{{1000, 256, 10, 13}, {25, 125, 3, 1}, {25, 125, 3, 1}, {1}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -38,11 +38,11 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, correct_inputs_axis_as_c const auto d = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); const auto i = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); const auto u = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); - const auto a = std::make_shared(element::i64, Shape{}, -2); + const auto a = std::make_shared(element::i64, ov::Shape{}, -2); const auto op = make_op(d, i, u, a); - input_shapes = ShapeVector{{2, 5, 10, 15}, {2, 1, 10, 15}, {2, 1, 10, 15}, {}}; + input_shapes = StaticShapeVector{{2, 5, 10, 15}, {2, 1, 10, 15}, {2, 1, 10, 15}, {}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -58,9 +58,9 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, params_are_dynamic_rank_ const auto op = make_op(d, i, u, a); uint32_t axis = 2; - const auto const_data = std::unordered_map{{3, {element::u32, Shape{}, &axis}}}; + const auto const_data = std::unordered_map{{3, {element::u32, ov::Shape{}, &axis}}}; - input_shapes = ShapeVector{{5000, 256, 10, 15}, {30, 25, 3, 3}, {30, 25, 3, 3}, {}}; + input_shapes = StaticShapeVector{{5000, 256, 10, 15}, {30, 25, 3, 3}, {30, 25, 3, 3}, {}}; const auto output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_EQ(output_shapes.size(), 1); @@ -76,9 +76,9 @@ TEST_F(ScatterElementsUpdateV3StaticShapeInferenceTest, incorrect_axis_value) { const auto op = make_op(d, i, u, a); uint32_t axis = 4; - const auto const_data = std::unordered_map{{3, {element::u32, Shape{}, &axis}}}; + const auto const_data = std::unordered_map{{3, {element::u32, ov::Shape{}, &axis}}}; - input_shapes = ShapeVector{{5000, 256, 10, 15}, {30, 25, 3, 3}, {30, 25, 3, 3}, {}}; + input_shapes = StaticShapeVector{{5000, 256, 10, 15}, {30, 25, 3, 3}, {30, 25, 3, 3}, {}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_data), AssertFailure, HasSubstr("Axis 4 out of the tensor rank range [-4, 3]")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_nd_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_nd_shape_inference_test.cpp index 135a9a85ff7a6f..d77984fff0e873 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_nd_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_nd_shape_inference_test.cpp @@ -20,7 +20,7 @@ class ScatterNDUpdateV3StaticShapeInferenceTest : public OpStaticShapeInferenceT TEST_F(ScatterNDUpdateV3StaticShapeInferenceTest, default_ctor) { const auto op = make_op(); - input_shapes = ShapeVector{{1000, 256, 10, 13}, {25, 125, 3}, {25, 125, 13}}; + input_shapes = StaticShapeVector{{1000, 256, 10, 13}, {25, 125, 3}, {25, 125, 13}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -34,7 +34,7 @@ TEST_F(ScatterNDUpdateV3StaticShapeInferenceTest, correct_inputs) { const auto op = make_op(d, i, u); - input_shapes = ShapeVector{{1000, 256, 10, 15}, {25, 125, 3}, {25, 125, 15}}; + input_shapes = StaticShapeVector{{1000, 256, 10, 15}, {25, 125, 3}, {25, 125, 15}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); @@ -48,7 +48,7 @@ TEST_F(ScatterNDUpdateV3StaticShapeInferenceTest, params_are_dynamic_rank) { const auto op = make_op(d, i, u); - input_shapes = ShapeVector{{5000, 256, 10, 15}, {30, 25, 3}, {30, 25, 15}}; + input_shapes = StaticShapeVector{{5000, 256, 10, 15}, {30, 25, 3}, {30, 25, 15}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_update_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_update_shape_inference_test.cpp index a8cec512b71ae5..082c6087e1092c 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_update_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/scatter_update_shape_inference_test.cpp @@ -19,7 +19,7 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_3D_axis_1) { int32_t axis_val[] = {1}; std::unordered_map constant_data; - constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val); + constant_data[3] = ov::Tensor(element::Type_t::i32, ov::Shape{1}, axis_val); std::vector input_shapes = {StaticShape{2, 3, 4}, StaticShape{2, 1}, StaticShape{2, 2, 1, 4}, @@ -39,7 +39,7 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_4D_axis_2) { int32_t axis_val[] = {2}; std::unordered_map constant_data; - constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val); + constant_data[3] = ov::Tensor(element::Type_t::i32, ov::Shape{1}, axis_val); std::vector input_shapes = {StaticShape{1000, 256, 10, 15}, StaticShape{125, 20}, StaticShape{1000, 125, 20, 10, 15}, @@ -59,7 +59,7 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_4D_incompatible_axis) { int32_t axis_val[] = {1}; std::unordered_map constant_data; - constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val); + constant_data[3] = ov::Tensor(element::Type_t::i32, ov::Shape{1}, axis_val); std::vector input_shapes = {StaticShape{1000, 256, 10, 15}, StaticShape{125, 20}, StaticShape{1000, 125, 20, 10, 15}, @@ -73,7 +73,7 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_axis_as_const) { auto data_param = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); auto indices_param = std::make_shared(element::i32, PartialShape{-1, -1}); auto updates_param = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1, -1}); - auto axis_const = std::make_shared(element::i32, Shape{1}, std::vector{1}); + auto axis_const = std::make_shared(element::i32, ov::Shape{1}, std::vector{1}); auto scatter_update = std::make_shared(data_param, indices_param, updates_param, axis_const); @@ -95,7 +95,7 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_dynamic_rank) { int32_t axis_val[] = {1}; std::unordered_map constant_data; - constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val); + constant_data[3] = ov::Tensor(element::Type_t::i32, ov::Shape{1}, axis_val); std::vector input_shapes = {StaticShape{1000, 256, 10, 15}, StaticShape{125, 20}, StaticShape{1000, 125, 20, 10, 15}, @@ -115,7 +115,7 @@ TEST(StaticShapeInferenceTest, ScatterUpdate_params_dynamic_rank_incorrect_updat int32_t axis_val[] = {1}; std::unordered_map constant_data; - constant_data[3] = ov::Tensor(element::Type_t::i32, Shape{1}, axis_val); + constant_data[3] = ov::Tensor(element::Type_t::i32, ov::Shape{1}, axis_val); // Incorrect rank of the third input shape std::vector input_shapes = {StaticShape{1000, 256, 10, 15}, diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/search_sorted_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/search_sorted_shape_inference_test.cpp index ac0b4763b7bf5d..90a9dacafa8324 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/search_sorted_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/search_sorted_shape_inference_test.cpp @@ -19,7 +19,7 @@ TEST_F(SearchSortedShapeInferenceTest, same_dimensions_nd_inputs) { const auto sorted = std::make_shared(element::i64, PartialShape::dynamic()); const auto values = std::make_shared(element::i64, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{1, 3, 6}, StaticShape{1, 3, 6}}; + const auto input_shapes = StaticShapeVector{StaticShape{1, 3, 6}, StaticShape{1, 3, 6}}; const auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 6})); @@ -29,7 +29,7 @@ TEST_F(SearchSortedShapeInferenceTest, scalar_values) { const auto sorted = std::make_shared(element::i64, PartialShape::dynamic()); const auto values = std::make_shared(element::i64, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{3}, StaticShape{}}; + const auto input_shapes = StaticShapeVector{StaticShape{3}, StaticShape{}}; const auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape{}); @@ -39,7 +39,7 @@ TEST_F(SearchSortedShapeInferenceTest, different_last_dim) { const auto sorted = std::make_shared(element::i64, PartialShape::dynamic()); const auto values = std::make_shared(element::i64, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{1, 3, 7, 100}, StaticShape{1, 3, 7, 10}}; + const auto input_shapes = StaticShapeVector{StaticShape{1, 3, 7, 100}, StaticShape{1, 3, 7, 10}}; const auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 7, 10})); @@ -49,7 +49,7 @@ TEST_F(SearchSortedShapeInferenceTest, 1d_inputs) { const auto sorted = std::make_shared(element::i64, PartialShape::dynamic()); const auto values = std::make_shared(element::i64, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{5}, StaticShape{20}}; + const auto input_shapes = StaticShapeVector{StaticShape{5}, StaticShape{20}}; const auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({20})); @@ -59,7 +59,7 @@ TEST_F(SearchSortedShapeInferenceTest, 1d_sequence) { const auto sorted = std::make_shared(element::i64, PartialShape::dynamic()); const auto values = std::make_shared(element::i64, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{50}, StaticShape{1, 3, 7, 10}}; + const auto input_shapes = StaticShapeVector{StaticShape{50}, StaticShape{1, 3, 7, 10}}; const auto output_shapes = shape_inference(op.get(), input_shapes); EXPECT_EQ(output_shapes.size(), 1); EXPECT_EQ(output_shapes.front(), StaticShape({1, 3, 7, 10})); @@ -77,7 +77,7 @@ TEST_F(SearchSortedShapeInferenceTest, input_shapes_ranks_validation) { const auto sorted = std::make_shared(element::i32, PartialShape::dynamic()); const auto values = std::make_shared(element::i32, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{1, 3, 6}, StaticShape{1, 3, 6, 7}}; + const auto input_shapes = StaticShapeVector{StaticShape{1, 3, 6}, StaticShape{1, 3, 6, 7}}; OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes), NodeValidationFailure, testing::HasSubstr("the ranks of the inputs have to be compatible")); @@ -87,7 +87,7 @@ TEST_F(SearchSortedShapeInferenceTest, input_shapes_compatibility) { const auto sorted = std::make_shared(element::i32, PartialShape::dynamic()); const auto values = std::make_shared(element::i32, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{1, 3, 6}, StaticShape{1, 6, 6}}; + const auto input_shapes = StaticShapeVector{StaticShape{1, 3, 6}, StaticShape{1, 6, 6}}; OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes), NodeValidationFailure, testing::HasSubstr("All dimensions but the last one have to be compatible")); @@ -97,7 +97,7 @@ TEST_F(SearchSortedShapeInferenceTest, scalar_sorted_sequence) { const auto sorted = std::make_shared(element::i32, PartialShape::dynamic()); const auto values = std::make_shared(element::i32, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{}, StaticShape{1, 6, 6}}; + const auto input_shapes = StaticShapeVector{StaticShape{}, StaticShape{1, 6, 6}}; OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes), NodeValidationFailure, testing::HasSubstr("The sorted sequence input cannot be a scalar")); @@ -107,7 +107,7 @@ TEST_F(SearchSortedShapeInferenceTest, scalar_values_and_ND_sequence) { const auto sorted = std::make_shared(element::i32, PartialShape::dynamic()); const auto values = std::make_shared(element::i32, PartialShape::dynamic()); const auto op = make_op(sorted, values); - const auto input_shapes = ShapeVector{StaticShape{2, 3}, StaticShape{}}; + const auto input_shapes = StaticShapeVector{StaticShape{2, 3}, StaticShape{}}; OV_EXPECT_THROW(std::ignore = shape_inference(op.get(), input_shapes), NodeValidationFailure, testing::HasSubstr("the ranks of the inputs have to be compatible")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/slice_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/slice_shape_inference_test.cpp index 3aed5f4d512909..177087fb12d98a 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/slice_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/slice_shape_inference_test.cpp @@ -24,10 +24,11 @@ class SliceStaticShapeInferenceTest : public OpStaticShapeInferenceTest(element::f32, PartialShape::dynamic()); - const auto start = op::v0::Constant::create(element::i64, Shape{5}, std::vector{100, 5, -1, INT64_MAX, 5}); + const auto start = + op::v0::Constant::create(element::i64, ov::Shape{5}, std::vector{100, 5, -1, INT64_MAX, 5}); const auto stop = - op::v0::Constant::create(element::i64, Shape{5}, std::vector{-100, INT64_MIN, -6, 5, -10}); - const auto steps = op::v0::Constant::create(element::i64, Shape{5}, {-1, -2, -1, -1, -2}); + op::v0::Constant::create(element::i64, ov::Shape{5}, std::vector{-100, INT64_MIN, -6, 5, -10}); + const auto steps = op::v0::Constant::create(element::i64, ov::Shape{5}, {-1, -2, -1, -1, -2}); const auto op = make_op(data, start, stop, steps); @@ -47,19 +48,19 @@ TEST_F(SliceStaticShapeInferenceTest, reverse_step_on_signle_axis_but_start_stop const auto start = std::make_shared(et, PartialShape::dynamic()); const auto stop = std::make_shared(et, PartialShape::dynamic()); const auto steps = std::make_shared(et, PartialShape::dynamic()); - const auto axes = op::v0::Constant::create(element::i64, Shape{1}, {-1}); + const auto axes = op::v0::Constant::create(element::i64, ov::Shape{1}, {-1}); auto start_buff = std::vector{100}; auto stop_buff = std::vector{2}; auto steps_buff = std::vector{-2}; - const auto start_tensor = ov::Tensor(element::i64, Shape{1}, static_cast(start_buff.data())); - const auto stop_tensor = ov::Tensor(element::i64, Shape{1}, static_cast(stop_buff.data())); - const auto steps_tensor = ov::Tensor(element::i64, Shape{1}, static_cast(steps_buff.data())); + const auto start_tensor = ov::Tensor(element::i64, ov::Shape{1}, static_cast(start_buff.data())); + const auto stop_tensor = ov::Tensor(element::i64, ov::Shape{1}, static_cast(stop_buff.data())); + const auto steps_tensor = ov::Tensor(element::i64, ov::Shape{1}, static_cast(steps_buff.data())); const auto op = make_op(data, start, stop, steps, axes); - input_shapes = ShapeVector{{3, 4, 10}, {1}, {1}, {1}, axes->get_shape()}; + input_shapes = StaticShapeVector{{3, 4, 10}, {1}, {1}, {1}, axes->get_shape()}; const std::unordered_map& constant_data = {{1, start_tensor}, {2, stop_tensor}, @@ -84,7 +85,7 @@ TEST_F(SliceStaticShapeInferenceTest, forward_step_all_data_in_const_map) { auto steps_buff = std::vector{1, 2, 1, 3, 4, 2, 2}; auto axes_buff = std::vector{0, 1, 2, 3, 4, 5, 6}; - const auto common_shape = Shape{start_buff.size()}; + const auto common_shape = ov::Shape{start_buff.size()}; const auto start_tensor = ov::Tensor(element::i64, common_shape, static_cast(start_buff.data())); const auto stop_tensor = ov::Tensor(element::i64, common_shape, static_cast(stop_buff.data())); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/space_to_batch_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/space_to_batch_shape_inference_test.cpp index 77f85dba48fe1e..9ff89294eb9237 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/space_to_batch_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/space_to_batch_shape_inference_test.cpp @@ -35,9 +35,9 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, default_ctor) { int32_t pads_begin_val[] = {0, 2, 0, 0, 0}; int32_t pads_end_val[] = {0, 2, 1, 0, 0}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{5}, block_val}}, - {2, {element::i32, Shape{5}, pads_begin_val}}, - {3, {element::i32, Shape{5}, pads_end_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{5}, block_val}}, + {2, {element::i32, ov::Shape{5}, pads_begin_val}}, + {3, {element::i32, ov::Shape{5}, pads_end_val}}}; input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}}; output_shapes = shape_inference(op.get(), input_shapes, constant_data); @@ -47,9 +47,10 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, default_ctor) { TEST_F(SpaceToBatchV1StaticShapeInferenceTest, blocks_pads_as_constants) { const auto data = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); - const auto block_shape = std::make_shared(element::i64, Shape{4}, std::vector{1, 12, 100, 2}); - const auto pads_begin = std::make_shared(element::i64, Shape{4}, std::vector{0, 3, 38, 1}); - const auto pads_end = std::make_shared(element::i64, Shape{4}, std::vector{0, 5, 38, 0}); + const auto block_shape = + std::make_shared(element::i64, ov::Shape{4}, std::vector{1, 12, 100, 2}); + const auto pads_begin = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 3, 38, 1}); + const auto pads_end = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 5, 38, 0}); const auto op = make_op(data, block_shape, pads_begin, pads_end); @@ -67,9 +68,9 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, blocks_pads_in_constant_map) { int32_t pads_begin_val[] = {0, 2, 0, 0, 0}; int32_t pads_end_val[] = {0, 2, 1, 0, 0}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{5}, block_val}}, - {2, {element::i32, Shape{5}, pads_begin_val}}, - {3, {element::i32, Shape{5}, pads_end_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{5}, block_val}}, + {2, {element::i32, ov::Shape{5}, pads_begin_val}}, + {3, {element::i32, ov::Shape{5}, pads_end_val}}}; input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}}; output_shapes = shape_inference(op.get(), input_shapes, constant_data); @@ -88,7 +89,7 @@ TEST_F(SpaceToBatchV1StaticShapeInferenceTest, exception_missing_pads_data_in_co const auto op = make_space_to_batch_dynamic(); int32_t block_val[] = {1, 6, 5, 1, 16}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{5}, block_val}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{5}, block_val}}}; input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}}; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/split_shape_inference_tests.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/split_shape_inference_tests.cpp index e694ecc99e5a9c..4d6ff932d9e9ea 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/split_shape_inference_tests.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/split_shape_inference_tests.cpp @@ -14,10 +14,10 @@ using namespace ov; using namespace ov::intel_cpu; using namespace testing; -using SplitTestParams = std::tuple; class SplitStaticShapeInferenceTest : public OpStaticShapeInferenceTest, @@ -26,7 +26,7 @@ class SplitStaticShapeInferenceTest : public OpStaticShapeInferenceTest(element::f32, input_shapes.front().get_shape()); } @@ -37,26 +37,26 @@ class SplitStaticShapeInferenceTest : public OpStaticShapeInferenceTest(element::i64, Shape{}, axis); + const auto axis_node = std::make_shared(element::i64, ov::Shape{}, axis); op = make_op(arg, axis_node, num_of_splits); output_shapes = shape_inference(op.get(), input_shapes); @@ -66,7 +66,7 @@ TEST_P(SplitStaticShapeInferenceTest, shape_inference_empty_const_map) { } TEST_P(SplitStaticShapeInferenceTest, shape_inference_with_const_map) { - const auto axis_node = std::make_shared(element::i64, Shape{}); + const auto axis_node = std::make_shared(element::i64, ov::Shape{}); op = make_op(arg, axis_node, num_of_splits); const auto axis_tensor = ov::Tensor(element::i64, ov::Shape{}, &axis); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/squeeze_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/squeeze_shape_inference_test.cpp index 5f790135780013..b9360ca4039825 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/squeeze_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/squeeze_shape_inference_test.cpp @@ -21,7 +21,7 @@ namespace v0 { class SqueezeV0StaticShapeInferenceAssertTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - output_shapes = ShapeVector(1); + output_shapes = StaticShapeVector(1); } }; @@ -30,7 +30,7 @@ TEST_F(SqueezeV0StaticShapeInferenceAssertTest, no_axes) { const auto axes = std::make_shared(element::i64, PartialShape{1}); const auto op = make_op(arg, axes); - input_shapes = ShapeVector{{5, 6}, axes->get_shape()}; + input_shapes = StaticShapeVector{{5, 6}, axes->get_shape()}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, @@ -38,18 +38,18 @@ TEST_F(SqueezeV0StaticShapeInferenceAssertTest, no_axes) { } TEST_F(SqueezeV0StaticShapeInferenceAssertTest, parameter_static_shape_axes_no_data) { - const auto arg = std::make_shared(element::f64, Shape{2, 1, 3, 1}); - const auto axes = std::make_shared(element::i64, Shape{2}); + const auto arg = std::make_shared(element::f64, ov::Shape{2, 1, 3, 1}); + const auto axes = std::make_shared(element::i64, ov::Shape{2}); const auto op = make_op(arg, axes); - input_shapes = ShapeVector{arg->get_shape(), axes->get_shape()}; + input_shapes = StaticShapeVector{arg->get_shape(), axes->get_shape()}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Check 'constant != nullptr'")); } -using TestParams = std::tuple, // Squeeze axes StaticShape // Expected shape >; @@ -61,7 +61,7 @@ class SqueezeV0StaticShapeInferenceTest : public SqueezeV0StaticShapeInferenceAs SqueezeV0StaticShapeInferenceAssertTest::SetUp(); std::tie(input_shapes, axes, exp_shape) = GetParam(); - output_shapes = ShapeVector(1); + output_shapes = StaticShapeVector(1); arg = std::make_shared(element::f32, input_shapes.front().get_shape()); } @@ -71,39 +71,41 @@ class SqueezeV0StaticShapeInferenceTest : public SqueezeV0StaticShapeInferenceAs INSTANTIATE_TEST_SUITE_P(1d_shapes, SqueezeV0StaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{1}, {1}}, std::vector{-1}, StaticShape({})), - make_tuple(ShapeVector{{6}, {1}}, std::vector{-1}, StaticShape({6})), - make_tuple(ShapeVector{{1}, {1}}, std::vector{0}, StaticShape({}))), + Values(make_tuple(StaticShapeVector{{1}, {1}}, std::vector{-1}, StaticShape({})), + make_tuple(StaticShapeVector{{6}, {1}}, std::vector{-1}, StaticShape({6})), + make_tuple(StaticShapeVector{{1}, {1}}, std::vector{0}, StaticShape({}))), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P( multi_dim_shapes, SqueezeV0StaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{1, 2, 3, 1}, {2}}, std::vector{0, 3}, StaticShape({2, 3})), - make_tuple(ShapeVector{{2, 1, 1, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 4})), - make_tuple(ShapeVector{{2, 1, 1, 4, 1}, {2}}, std::vector{0, 1, -2, -1}, StaticShape({2, 1, 4})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{0, 2, 4}, StaticShape({3, 2})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{4, 2, 0}, StaticShape({3, 2})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, 0, 4}, StaticShape({3, 2})), - make_tuple(ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {4}}, - std::vector{1, -1, 3, -2}, - StaticShape({10, 0, 3})), - make_tuple(ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {}}, std::vector{}, StaticShape({10, 0, 3})), - make_tuple(ShapeVector{{2, 1, 7, 8, 3}, {1}}, std::vector{1}, StaticShape({2, 7, 8, 3}))), + Values( + make_tuple(StaticShapeVector{{1, 2, 3, 1}, {2}}, std::vector{0, 3}, StaticShape({2, 3})), + make_tuple(StaticShapeVector{{2, 1, 1, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 4})), + make_tuple(StaticShapeVector{{2, 1, 1, 4, 1}, {2}}, std::vector{0, 1, -2, -1}, StaticShape({2, 1, 4})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{0, 2, 4}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{4, 2, 0}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, 0, 4}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{10, 1, 0, 1, 3, 1, 1}, {4}}, + std::vector{1, -1, 3, -2}, + StaticShape({10, 0, 3})), + make_tuple(StaticShapeVector{{10, 1, 0, 1, 3, 1, 1}, {}}, std::vector{}, StaticShape({10, 0, 3})), + make_tuple(StaticShapeVector{{2, 1, 7, 8, 3}, {1}}, std::vector{1}, StaticShape({2, 7, 8, 3}))), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P( multi_dim_shapes_repeated_axis, SqueezeV0StaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{2, 1, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 3})), - make_tuple(ShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 2})), - make_tuple(ShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1, -1}, StaticShape({3, 2})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, -1, 2, -1, 0}, StaticShape({3, 2})), - make_tuple(ShapeVector{{2, 6, 7, 8, 1}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8}))), + Values( + make_tuple(StaticShapeVector{{2, 1, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 3})), + make_tuple(StaticShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1, -1}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, -1, 2, -1, 0}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{2, 6, 7, 8, 1}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8}))), PrintToStringParamName()); TEST_P(SqueezeV0StaticShapeInferenceTest, shape_inference_empty_const_map) { - const auto axes_node = std::make_shared(element::i64, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); const auto op = make_op(arg, axes_node); output_shapes = shape_inference(op.get(), input_shapes); @@ -131,7 +133,7 @@ namespace v15 { class SqueezeV15StaticShapeInferenceAssertTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - output_shapes = ShapeVector(1); + output_shapes = StaticShapeVector(1); } }; @@ -140,7 +142,7 @@ TEST_F(SqueezeV15StaticShapeInferenceAssertTest, no_axes) { const auto axes = std::make_shared(element::i64, PartialShape{1}); const auto op = make_op(arg, axes); - input_shapes = ShapeVector{{5, 6}, axes->get_shape()}; + input_shapes = StaticShapeVector{{5, 6}, axes->get_shape()}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, @@ -148,18 +150,18 @@ TEST_F(SqueezeV15StaticShapeInferenceAssertTest, no_axes) { } TEST_F(SqueezeV15StaticShapeInferenceAssertTest, parameter_static_shape_axes_no_data) { - const auto arg = std::make_shared(element::f64, Shape{2, 1, 3, 1}); - const auto axes = std::make_shared(element::i64, Shape{2}); + const auto arg = std::make_shared(element::f64, ov::Shape{2, 1, 3, 1}); + const auto axes = std::make_shared(element::i64, ov::Shape{2}); const auto op = make_op(arg, axes); - input_shapes = ShapeVector{arg->get_shape(), axes->get_shape()}; + input_shapes = StaticShapeVector{arg->get_shape(), axes->get_shape()}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes), NodeValidationFailure, HasSubstr("Check 'constant != nullptr'")); } -using TestParams = std::tuple, // Squeeze axes StaticShape // Expected shape >; @@ -171,7 +173,7 @@ class SqueezeV15StaticShapeInferenceTest : public SqueezeV15StaticShapeInference SqueezeV15StaticShapeInferenceAssertTest::SetUp(); std::tie(input_shapes, axes, exp_shape) = GetParam(); - output_shapes = ShapeVector(1); + output_shapes = StaticShapeVector(1); arg = std::make_shared(element::f32, input_shapes.front().get_shape()); } @@ -181,39 +183,41 @@ class SqueezeV15StaticShapeInferenceTest : public SqueezeV15StaticShapeInference INSTANTIATE_TEST_SUITE_P(1d_shapes, SqueezeV15StaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{1}, {1}}, std::vector{-1}, StaticShape({})), - make_tuple(ShapeVector{{6}, {1}}, std::vector{-1}, StaticShape({6})), - make_tuple(ShapeVector{{1}, {1}}, std::vector{0}, StaticShape({}))), + Values(make_tuple(StaticShapeVector{{1}, {1}}, std::vector{-1}, StaticShape({})), + make_tuple(StaticShapeVector{{6}, {1}}, std::vector{-1}, StaticShape({6})), + make_tuple(StaticShapeVector{{1}, {1}}, std::vector{0}, StaticShape({}))), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P( multi_dim_shapes, SqueezeV15StaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{1, 2, 3, 1}, {2}}, std::vector{0, 3}, StaticShape({2, 3})), - make_tuple(ShapeVector{{2, 1, 1, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 4})), - make_tuple(ShapeVector{{2, 1, 1, 4, 1}, {2}}, std::vector{0, 1, -2, -1}, StaticShape({2, 1, 4})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{0, 2, 4}, StaticShape({3, 2})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{4, 2, 0}, StaticShape({3, 2})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, 0, 4}, StaticShape({3, 2})), - make_tuple(ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {4}}, - std::vector{1, -1, 3, -2}, - StaticShape({10, 0, 3})), - make_tuple(ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {}}, std::vector{}, StaticShape({10, 0, 3})), - make_tuple(ShapeVector{{2, 1, 7, 8, 3}, {1}}, std::vector{1}, StaticShape({2, 7, 8, 3}))), + Values( + make_tuple(StaticShapeVector{{1, 2, 3, 1}, {2}}, std::vector{0, 3}, StaticShape({2, 3})), + make_tuple(StaticShapeVector{{2, 1, 1, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 4})), + make_tuple(StaticShapeVector{{2, 1, 1, 4, 1}, {2}}, std::vector{0, 1, -2, -1}, StaticShape({2, 1, 4})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{0, 2, 4}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{4, 2, 0}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, 0, 4}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{10, 1, 0, 1, 3, 1, 1}, {4}}, + std::vector{1, -1, 3, -2}, + StaticShape({10, 0, 3})), + make_tuple(StaticShapeVector{{10, 1, 0, 1, 3, 1, 1}, {}}, std::vector{}, StaticShape({10, 0, 3})), + make_tuple(StaticShapeVector{{2, 1, 7, 8, 3}, {1}}, std::vector{1}, StaticShape({2, 7, 8, 3}))), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P( multi_dim_shapes_repeated_axis, SqueezeV15StaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{2, 1, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 3})), - make_tuple(ShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 2})), - make_tuple(ShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1, -1}, StaticShape({3, 2})), - make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, -1, 2, -1, 0}, StaticShape({3, 2})), - make_tuple(ShapeVector{{2, 6, 7, 8, 1}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8}))), + Values( + make_tuple(StaticShapeVector{{2, 1, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 3})), + make_tuple(StaticShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{3, 1, 2, 1}, {3}}, std::vector{1, -1, 1, -1}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector{2, -1, 2, -1, 0}, StaticShape({3, 2})), + make_tuple(StaticShapeVector{{2, 6, 7, 8, 1}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8}))), PrintToStringParamName()); TEST_P(SqueezeV15StaticShapeInferenceTest, shape_inference_empty_const_map) { - const auto axes_node = std::make_shared(element::i64, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); const auto op = make_op(arg, axes_node); output_shapes = shape_inference(op.get(), input_shapes); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/stft_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/stft_shape_inference_test.cpp index 9d2960e31b71f2..278781e67ecccb 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/stft_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/stft_shape_inference_test.cpp @@ -30,8 +30,8 @@ TEST_F(STFTShapeInferenceTest, all_input_as_params_1D_signal) { int32_t frame_size = 16; int32_t frame_step = 16; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); auto static_output_shapes = shape_infer(op.get(), static_input_shapes, acc); ASSERT_EQ(static_output_shapes[0], StaticShape({9, 3, 2})); @@ -50,8 +50,8 @@ TEST_F(STFTShapeInferenceTest, all_input_as_params) { int32_t frame_size = 16; int32_t frame_step = 16; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); auto static_output_shapes = shape_infer(op.get(), static_input_shapes, acc); ASSERT_EQ(static_output_shapes[0], StaticShape({1, 9, 3, 2})); @@ -70,8 +70,8 @@ TEST_F(STFTShapeInferenceTest, all_input_as_params_equal_dims) { int32_t frame_size = 16; int32_t frame_step = 16; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); auto static_output_shapes = shape_infer(op.get(), static_input_shapes, acc); ASSERT_EQ(static_output_shapes[0], StaticShape({1, 9, 1, 2})); @@ -111,8 +111,8 @@ TEST_F(STFTShapeInferenceTest, frame_size_incompatible_value_big) { int32_t frame_size = 49; int32_t frame_step = 16; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); OV_EXPECT_THROW(std::ignore = shape_infer(op.get(), static_input_shapes, acc), NodeValidationFailure, @@ -133,8 +133,8 @@ TEST_F(STFTShapeInferenceTest, frame_size_incompatible_value_small) { int32_t frame_size = -1; int32_t frame_step = 16; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); OV_EXPECT_THROW(std::ignore = shape_infer(op.get(), static_input_shapes, acc), NodeValidationFailure, @@ -155,8 +155,8 @@ TEST_F(STFTShapeInferenceTest, frame_step_incompatible_value) { int32_t frame_size = 16; int32_t frame_step = -1; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); OV_EXPECT_THROW(std::ignore = shape_infer(op.get(), static_input_shapes, acc), NodeValidationFailure, @@ -177,8 +177,8 @@ TEST_F(STFTShapeInferenceTest, window_incompatible_dim_with_frame_size) { int32_t frame_size = 8; int32_t frame_step = 4; - auto const_data = std::unordered_map{{2, {element::i32, Shape{}, &frame_size}}, - {3, {element::i32, Shape{}, &frame_step}}}; + auto const_data = std::unordered_map{{2, {element::i32, ov::Shape{}, &frame_size}}, + {3, {element::i32, ov::Shape{}, &frame_step}}}; auto acc = make_tensor_accessor(const_data); OV_EXPECT_THROW(std::ignore = shape_infer(op.get(), static_input_shapes, acc), diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/strided_slice_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/strided_slice_shape_inference_test.cpp index 56f137c129b6ac..aa427c3a7db17d 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/strided_slice_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/strided_slice_shape_inference_test.cpp @@ -23,13 +23,13 @@ TEST_F(StridedSliceStaticShapeInferenceTest, reverse_stride_begin_end_clip_to_di const auto mask = std::vector(4, 0); const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = op::v0::Constant::create(element::i64, Shape{3}, {100}); - const auto end = op::v0::Constant::create(element::i64, Shape{3}, {-100}); - const auto stride = op::v0::Constant::create(element::i64, Shape{3}, {-1}); + const auto begin = op::v0::Constant::create(element::i64, ov::Shape{3}, {100}); + const auto end = op::v0::Constant::create(element::i64, ov::Shape{3}, {-100}); + const auto stride = op::v0::Constant::create(element::i64, ov::Shape{3}, {-1}); const auto op = make_op(data, begin, end, stride, mask, mask); - input_shapes = ShapeVector{{3, 4, 5}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 4, 5}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{3, 4, 5})); } @@ -38,9 +38,9 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_1) { const auto mask = std::vector(4, 0); const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = std::make_shared(element::i64, Shape{3}); - const auto end = std::make_shared(element::i64, Shape{3}); - const auto stride = std::make_shared(element::i64, Shape{3}); + const auto begin = std::make_shared(element::i64, ov::Shape{3}); + const auto end = std::make_shared(element::i64, ov::Shape{3}); + const auto stride = std::make_shared(element::i64, ov::Shape{3}); const auto op = make_op(data, begin, end, stride, mask, mask); @@ -50,7 +50,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_1) { const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, begin_v}}, {2, {element::i64, ov::Shape{3}, end_v}}, {3, {element::i64, ov::Shape{3}, stride_v}}}; - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 1, 3})); @@ -60,9 +60,9 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_2) { const auto mask = std::vector(4, 0); const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = std::make_shared(element::i64, Shape{3}); - const auto end = std::make_shared(element::i64, Shape{3}); - const auto stride = std::make_shared(element::i64, Shape{3}); + const auto begin = std::make_shared(element::i64, ov::Shape{3}); + const auto end = std::make_shared(element::i64, ov::Shape{3}); + const auto stride = std::make_shared(element::i64, ov::Shape{3}); const auto op = make_op(data, begin, end, stride, mask, mask); @@ -72,7 +72,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_2) { const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, begin_v}}, {2, {element::i64, ov::Shape{3}, end_v}}, {3, {element::i64, ov::Shape{3}, stride_v}}}; - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 2, 3})); @@ -82,9 +82,9 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_3) { const auto mask = std::vector(4, 0); const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = std::make_shared(element::i64, Shape{3}); - const auto end = std::make_shared(element::i64, Shape{3}); - const auto stride = std::make_shared(element::i64, Shape{3}); + const auto begin = std::make_shared(element::i64, ov::Shape{3}); + const auto end = std::make_shared(element::i64, ov::Shape{3}); + const auto stride = std::make_shared(element::i64, ov::Shape{3}); const auto op = make_op(data, begin, end, stride, mask, mask); @@ -94,7 +94,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_begin_end_variant_3) { const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, begin_v}}, {2, {element::i64, ov::Shape{3}, end_v}}, {3, {element::i64, ov::Shape{3}, stride_v}}}; - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 2, 2})); @@ -105,9 +105,9 @@ TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end) { const auto end_mask = std::vector(3, 1); const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = std::make_shared(element::i64, Shape{3}); - const auto end = std::make_shared(element::i64, Shape{3}); - const auto stride = std::make_shared(element::i64, Shape{3}); + const auto begin = std::make_shared(element::i64, ov::Shape{3}); + const auto end = std::make_shared(element::i64, ov::Shape{3}); + const auto stride = std::make_shared(element::i64, ov::Shape{3}); const auto op = make_op(data, begin, end, stride, begin_mask, end_mask); @@ -117,7 +117,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end) { const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, begin_v}}, {2, {element::i64, ov::Shape{3}, end_v}}, {3, {element::i64, ov::Shape{3}, stride_v}}}; - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2, 2, 3})); @@ -128,9 +128,9 @@ TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end_stride_by_two_last const auto end_mask = std::vector{0, 1, 1}; const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = std::make_shared(element::i64, Shape{3}); - const auto end = std::make_shared(element::i64, Shape{3}); - const auto stride = std::make_shared(element::i64, Shape{3}); + const auto begin = std::make_shared(element::i64, ov::Shape{3}); + const auto end = std::make_shared(element::i64, ov::Shape{3}); + const auto stride = std::make_shared(element::i64, ov::Shape{3}); auto op = make_op(data, begin, end, stride, begin_mask, end_mask); @@ -140,7 +140,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, ignore_begin_end_stride_by_two_last const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, begin_v}}, {2, {element::i64, ov::Shape{3}, end_v}}, {3, {element::i64, ov::Shape{3}, stride_v}}}; - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{2, 1, 2})); @@ -150,9 +150,9 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_reverse_stride_on_last_dimensio const auto mask = std::vector{0, 1, 1}; const auto data = std::make_shared(element::f32, ov::PartialShape::dynamic()); - const auto begin = std::make_shared(element::i64, Shape{3}); - const auto end = std::make_shared(element::i64, Shape{3}); - const auto stride = std::make_shared(element::i64, Shape{3}); + const auto begin = std::make_shared(element::i64, ov::Shape{3}); + const auto end = std::make_shared(element::i64, ov::Shape{3}); + const auto stride = std::make_shared(element::i64, ov::Shape{3}); const auto op = make_op(data, begin, end, stride, mask, mask); @@ -162,7 +162,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, use_reverse_stride_on_last_dimensio const auto const_data = std::unordered_map{{1, {element::i64, ov::Shape{3}, begin_v}}, {2, {element::i64, ov::Shape{3}, end_v}}, {3, {element::i64, ov::Shape{3}, stride_v}}}; - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes, const_data); EXPECT_THAT(output_shapes, ElementsAre(StaticShape{1, 2, 3})); @@ -176,7 +176,7 @@ TEST_F(StridedSliceStaticShapeInferenceTest, default_stride) { const auto end = op::v0::Constant::create(element::i64, ov::Shape{3}, {1, 0, 2}); const auto op = make_op(data, begin, end, mask, mask); - input_shapes = ShapeVector{{3, 2, 3}, {3}, {3}}; + input_shapes = StaticShapeVector{{3, 2, 3}, {3}, {3}}; output_shapes = shape_inference(op.get(), input_shapes); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_pack_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_pack_shape_inference_test.cpp index 3379651e11f235..e4975f27e00012 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_pack_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_pack_shape_inference_test.cpp @@ -13,12 +13,12 @@ using namespace ov::intel_cpu; using ov::op::v0::Constant; using ov::op::v0::Parameter; -class StringTensorPackStaticTestSuite : public ::testing::TestWithParam, // begins - std::vector, // ends - std::vector // symbols - >> {}; +class StringTensorPackStaticTestSuite + : public ::testing::TestWithParam, // begins + std::vector, // ends + std::vector // symbols + >> {}; TEST_P(StringTensorPackStaticTestSuite, StringTensorPackStaticShapeInference) { const auto& param = GetParam(); @@ -29,9 +29,9 @@ TEST_P(StringTensorPackStaticTestSuite, StringTensorPackStaticShapeInference) { const auto begins = std::make_shared(element::i32, indices_shape, begins_param); const auto ends = std::make_shared(element::i32, indices_shape, ends_param); - const auto symbols = std::make_shared(element::u8, Shape{symbols_param.size()}, symbols_param); + const auto symbols = std::make_shared(element::u8, ov::Shape{symbols_param.size()}, symbols_param); - const auto input_shapes = ShapeVector{indices_shape, indices_shape, Shape{symbols_param.size()}}; + const auto input_shapes = StaticShapeVector{indices_shape, indices_shape, ov::Shape{symbols_param.size()}}; const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto op = std::make_shared(begins, ends, symbols); auto shape_infer = make_shape_inference(op); @@ -46,56 +46,40 @@ INSTANTIATE_TEST_SUITE_P( StringTensorPackStaticTestSuite, ::testing::Values( // "Intel" - std::make_tuple( - Shape{1}, - std::vector{0}, - std::vector{5}, - std::vector{0x49, 0x6e, 0x74, 0x65, 0x6c}), + std::make_tuple(ov::Shape{1}, + std::vector{0}, + std::vector{5}, + std::vector{0x49, 0x6e, 0x74, 0x65, 0x6c}), // "Intel", "OpenVINO" std::make_tuple( - Shape{2}, + ov::Shape{2}, std::vector{0, 5}, std::vector{5, 13}, - std::vector{0x49, 0x6e, 0x74, 0x65, 0x6c, 0x4f, - 0x70, 0x65, 0x6e, 0x56, 0x49, 0x4e, 0x4f}), + std::vector{0x49, 0x6e, 0x74, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x56, 0x49, 0x4e, 0x4f}), // " " - std::make_tuple( - Shape{1}, - std::vector{0}, - std::vector{0}, - std::vector{0x20}), + std::make_tuple(ov::Shape{1}, std::vector{0}, std::vector{0}, std::vector{0x20}), // "" - std::make_tuple( - Shape{0}, - std::vector{}, - std::vector{}, - std::vector{}), + std::make_tuple(ov::Shape{0}, std::vector{}, std::vector{}, std::vector{}), // (2, 2) shape; "1", "2", "3", "4" - std::make_tuple( - Shape{2, 2}, - std::vector{0, 1, 2, 3}, - std::vector{1, 2, 3, 4}, - std::vector{0x31, 0x32, 0x33, 0x34}), + std::make_tuple(ov::Shape{2, 2}, + std::vector{0, 1, 2, 3}, + std::vector{1, 2, 3, 4}, + std::vector{0x31, 0x32, 0x33, 0x34}), // (1, 2) shape; "1", "2" - std::make_tuple( - Shape{1, 2}, - std::vector{0, 1}, - std::vector{1, 2}, - std::vector{0x31, 0x32}), + std::make_tuple(ov::Shape{1, 2}, + std::vector{0, 1}, + std::vector{1, 2}, + std::vector{0x31, 0x32}), // skipped symbols; "1", "9" - std::make_tuple( - Shape{2}, - std::vector{0, 8}, - std::vector{1, 9}, - std::vector{0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x3}), + std::make_tuple(ov::Shape{2}, + std::vector{0, 8}, + std::vector{1, 9}, + std::vector{0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x3}), // mixed strings; "1", "", " ", "4" - std::make_tuple( - Shape{2, 2}, - std::vector{0, 1, 1, 2}, - std::vector{1, 1, 2, 3}, - std::vector{0x31, 0x20, 0x34}) - ) -); + std::make_tuple(ov::Shape{2, 2}, + std::vector{0, 1, 1, 2}, + std::vector{1, 1, 2, 3}, + std::vector{0x31, 0x20, 0x34}))); class StringTensorPackStaticShapeInferenceWithTensorAccessorTest: public OpStaticShapeInferenceTest {}; @@ -107,12 +91,11 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, data_from_ten int32_t begins[] = {0}; int32_t ends[] = {5}; uint8_t symbols[] = {0x49, 0x6e, 0x74, 0x65, 0x6c}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{1}, begins}}, - {1, {element::i32, Shape{1}, ends}}, - {2, {element::u8, Shape{5}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{1}, begins}}, + {1, {element::i32, ov::Shape{1}, ends}}, + {2, {element::u8, ov::Shape{5}, symbols}}}; - const auto input_shapes = ShapeVector{Shape{1}, Shape{1}, Shape{5}}; + const auto input_shapes = StaticShapeVector{{1}, {1}, {5}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -128,12 +111,11 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, data_from_ten int32_t begins[] = {0, 1, 2, 3}; int32_t ends[] = {1, 2, 3, 4}; uint8_t symbols[] = {0x31, 0x32, 0x33, 0x34}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{4}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{4}, symbols}}}; - const auto input_shapes = ShapeVector{Shape{2, 2}, Shape{2, 2}, Shape{4}}; + const auto input_shapes = StaticShapeVector{{2, 2}, {2, 2}, {4}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -149,12 +131,11 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, data_from_ten int32_t begins[] = {0, 1}; int32_t ends[] = {1, 2}; uint8_t symbols[] = {0x31, 0x32}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{1, 2}, begins}}, - {1, {element::i32, Shape{1, 2}, ends}}, - {2, {element::u8, Shape{2}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{1, 2}, begins}}, + {1, {element::i32, ov::Shape{1, 2}, ends}}, + {2, {element::u8, ov::Shape{2}, symbols}}}; - const auto input_shapes = ShapeVector{Shape{1, 2}, Shape{1, 2}, Shape{2}}; + const auto input_shapes = StaticShapeVector{{1, 2}, {1, 2}, {2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -170,12 +151,11 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, data_from_ten int32_t begins[] = {0, 8}; int32_t ends[] = {1, 9}; uint8_t symbols[] = {0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x3}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2}, begins}}, - {1, {element::i32, Shape{2}, ends}}, - {2, {element::u8, Shape{9}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2}, begins}}, + {1, {element::i32, ov::Shape{2}, ends}}, + {2, {element::u8, ov::Shape{9}, symbols}}}; - const auto input_shapes = ShapeVector{Shape{2}, Shape{2}, Shape{9}}; + const auto input_shapes = StaticShapeVector{{2}, {2}, {9}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -191,12 +171,11 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, data_from_ten int32_t begins[] = {0, 1, 1, 2}; int32_t ends[] = {1, 1, 2, 3}; uint8_t symbols[] = {0x31, 0x20, 0x34}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; - const auto input_shapes = ShapeVector{Shape{2, 2}, Shape{2, 2}, Shape{3}}; + const auto input_shapes = StaticShapeVector{{2, 2}, {2, 2}, {3}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -210,16 +189,15 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, indices_valid const auto symbols_param = std::make_shared(element::u8, ov::PartialShape::dynamic()); const auto op = make_op(begins_param, ends_param, symbols_param); uint8_t symbols[] = {0x31, 0x20, 0x34}; - const auto input_shapes = ShapeVector{Shape{2, 2}, Shape{2, 2}, Shape{3}}; + const auto input_shapes = StaticShapeVector{{2, 2}, {2, 2}, {3}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); { // negative begins indices int32_t begins[] = {-1, 1, 1, 2}; int32_t ends[] = {1, 1, 2, 3}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; OV_EXPECT_THROW(std::ignore = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)), NodeValidationFailure, testing::HasSubstr("Indices cannot be negative")); @@ -227,10 +205,9 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, indices_valid { // negative ends indices int32_t begins[] = {1, 1, 1, 2}; int32_t ends[] = {-1, 1, 2, 3}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; OV_EXPECT_THROW(std::ignore = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)), NodeValidationFailure, testing::HasSubstr("Indices cannot be negative")); @@ -238,10 +215,9 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, indices_valid { // begins out of bounds int32_t begins[] = {1, 1, 1, 4}; int32_t ends[] = {1, 1, 2, 3}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; OV_EXPECT_THROW(std::ignore = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)), NodeValidationFailure, testing::HasSubstr("The biggest index cannot be higher than the amount or characters in symbols input")); @@ -249,10 +225,9 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, indices_valid { // ends out of bounds int32_t begins[] = {1, 1, 1, 3}; int32_t ends[] = {1, 1, 2, 4}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; OV_EXPECT_THROW(std::ignore = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)), NodeValidationFailure, testing::HasSubstr("The biggest index cannot be higher than the amount or characters in symbols input")); @@ -260,10 +235,9 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, indices_valid { // unsorted begins int32_t begins[] = {1, 3, 1, 2}; int32_t ends[] = {1, 1, 2, 3}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; OV_EXPECT_THROW(std::ignore = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)), NodeValidationFailure, testing::HasSubstr("Indices must be in ascending order")); @@ -271,10 +245,9 @@ TEST_F(StringTensorPackStaticShapeInferenceWithTensorAccessorTest, indices_valid { // unsorted ends int32_t begins[] = {1, 1, 1, 2}; int32_t ends[] = {1, 1, 5, 3}; - const auto const_inputs = std::unordered_map{ - {0, {element::i32, Shape{2, 2}, begins}}, - {1, {element::i32, Shape{2, 2}, ends}}, - {2, {element::u8, Shape{3}, symbols}}}; + const auto const_inputs = std::unordered_map{{0, {element::i32, ov::Shape{2, 2}, begins}}, + {1, {element::i32, ov::Shape{2, 2}, ends}}, + {2, {element::u8, ov::Shape{3}, symbols}}}; OV_EXPECT_THROW(std::ignore = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)), NodeValidationFailure, testing::HasSubstr("Indices must be in ascending order")); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_unpack_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_unpack_shape_inference_test.cpp index 8fdf3de04131df..34f35b851b674b 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_unpack_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/string_tensor_unpack_shape_inference_test.cpp @@ -21,10 +21,10 @@ static size_t get_character_count(const std::vector& vec) { return count; } -class StringTensorUnpackStaticTestSuite : public ::testing::TestWithParam, // input data - Shape // input shape - >> {}; +class StringTensorUnpackStaticTestSuite + : public ::testing::TestWithParam, // input data + ov::Shape // input shape + >> {}; class StringTensorUnpackStaticShapeInferenceTest: public OpStaticShapeInferenceTest {}; @@ -32,9 +32,9 @@ TEST_F(StringTensorUnpackStaticShapeInferenceTest, data_from_tensor_accessor_1) const auto data = std::make_shared(element::string, ov::PartialShape::dynamic()); const auto op = make_op(data); std::string data_val[] = {"Intel", "OpenVINO"}; - auto const_inputs = std::unordered_map{{0, {element::string, Shape{2}, data_val}}}; + auto const_inputs = std::unordered_map{{0, {element::string, ov::Shape{2}, data_val}}}; - const auto input_shapes = ShapeVector{Shape{2}}; + const auto input_shapes = StaticShapeVector{ov::Shape{2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -48,9 +48,9 @@ TEST_F(StringTensorUnpackStaticShapeInferenceTest, data_from_tensor_accessor_2) const auto data = std::make_shared(element::string, ov::PartialShape::dynamic()); const auto op = make_op(data); std::string data_val[] = {"Intel Corp", " ", "Open VINO", "", "Artificial Intelligence"}; - auto const_inputs = std::unordered_map{{0, {element::string, Shape{5}, data_val}}}; + auto const_inputs = std::unordered_map{{0, {element::string, ov::Shape{5}, data_val}}}; - const auto input_shapes = ShapeVector{Shape{5}}; + const auto input_shapes = StaticShapeVector{ov::Shape{5}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -64,9 +64,9 @@ TEST_F(StringTensorUnpackStaticShapeInferenceTest, data_from_tensor_accessor_3) const auto data = std::make_shared(element::string, ov::PartialShape::dynamic()); const auto op = make_op(data); std::string data_val[] = {"Intel", "OpenVINO", "AI", "Edge", "Compute", "Vision", "Neural", "Networks"}; - auto const_inputs = std::unordered_map{{0, {element::string, Shape{2, 2, 2}, data_val}}}; + auto const_inputs = std::unordered_map{{0, {element::string, ov::Shape{2, 2, 2}, data_val}}}; - const auto input_shapes = ShapeVector{Shape{2, 2, 2}}; + const auto input_shapes = StaticShapeVector{ov::Shape{2, 2, 2}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -80,9 +80,9 @@ TEST_F(StringTensorUnpackStaticShapeInferenceTest, data_from_tensor_accessor_4) const auto data = std::make_shared(element::string, ov::PartialShape::dynamic()); const auto op = make_op(data); std::string data_val[] = {"In@tel", "Open#VINO", "A$I"}; - auto const_inputs = std::unordered_map{{0, {element::string, Shape{1, 3}, data_val}}}; + auto const_inputs = std::unordered_map{{0, {element::string, ov::Shape{1, 3}, data_val}}}; - const auto input_shapes = ShapeVector{Shape{1, 3}}; + const auto input_shapes = StaticShapeVector{ov::Shape{1, 3}}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor(const_inputs)); @@ -99,7 +99,7 @@ TEST_P(StringTensorUnpackStaticTestSuite, StringTensorUnpackStaticShapeInference const auto data = std::make_shared(element::string, input_shape, input_strings); const auto op = std::make_shared(data); - const auto input_shapes = ShapeVector{input_shape}; + const auto input_shapes = StaticShapeVector{input_shape}; auto shape_infer = make_shape_inference(op); const auto input_shape_refs = make_static_shape_refs(input_shapes); const auto output_shapes = *shape_infer->infer(input_shape_refs, make_tensor_accessor()); @@ -115,44 +115,24 @@ INSTANTIATE_TEST_SUITE_P( StringTensorUnpackStaticTestSuite, ::testing::Values( // single string - std::make_tuple( - std::vector{"Intel"}, - Shape{1}), + std::make_tuple(std::vector{"Intel"}, ov::Shape{1}), // multiple strings - std::make_tuple( - std::vector{"Intel", "OpenVINO", "AI"}, - Shape{3}), + std::make_tuple(std::vector{"Intel", "OpenVINO", "AI"}, ov::Shape{3}), // empty string - std::make_tuple( - std::vector{""}, - Shape{1}), + std::make_tuple(std::vector{""}, ov::Shape{1}), // strings with special characters - std::make_tuple( - std::vector{"In@tel", "Open#VINO", "A$I"}, - Shape{3}), + std::make_tuple(std::vector{"In@tel", "Open#VINO", "A$I"}, ov::Shape{3}), // strings with spaces and an empty string - std::make_tuple( - std::vector{"Intel Corp", " ", "Open VINO", "", "Artificial Intelligence"}, - Shape{1, 5}), + std::make_tuple(std::vector{"Intel Corp", " ", "Open VINO", "", "Artificial Intelligence"}, + ov::Shape{1, 5}), // empty vector - std::make_tuple( - std::vector{}, - Shape{0}), + std::make_tuple(std::vector{}, ov::Shape{0}), // different shapes - std::make_tuple( - std::vector{"Intel", "OpenVINO", "AI", "Edge"}, - Shape{2, 2}), - std::make_tuple( - std::vector{"Intel", "OpenVINO", "AI", "Edge", "Compute", "Vision"}, - Shape{2, 3}), + std::make_tuple(std::vector{"Intel", "OpenVINO", "AI", "Edge"}, ov::Shape{2, 2}), + std::make_tuple(std::vector{"Intel", "OpenVINO", "AI", "Edge", "Compute", "Vision"}, + ov::Shape{2, 3}), std::make_tuple( std::vector{"Intel", "OpenVINO", "AI", "Edge", "Compute", "Vision", "Neural", "Networks"}, - Shape{2, 2, 2}), - std::make_tuple( - std::vector{"Intel", "OpenVINO", "AI", "Edge"}, - Shape{1, 4}), - std::make_tuple( - std::vector{"Intel"}, - Shape{1, 1}) - ) -); + ov::Shape{2, 2, 2}), + std::make_tuple(std::vector{"Intel", "OpenVINO", "AI", "Edge"}, ov::Shape{1, 4}), + std::make_tuple(std::vector{"Intel"}, ov::Shape{1, 1}))); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/tile_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/tile_shape_inference_test.cpp index 011a3ef59f9630..aaaf6f504362c8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/tile_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/tile_shape_inference_test.cpp @@ -25,7 +25,7 @@ TEST(StaticShapeInferenceTest, TileTest) { TEST(StaticShapeInferenceTest, TileFewRepeatsTest) { auto param0 = std::make_shared(element::f32, PartialShape{-1, -1, -1}); - auto param1 = ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 1}); + auto param1 = ov::op::v0::Constant::create(element::i64, ov::Shape{2}, {4, 1}); auto tile = std::make_shared(param0, param1); // Test Static Shape std::vector static_input_shapes = {StaticShape{6, 8, 10}, StaticShape{2}}; @@ -35,7 +35,7 @@ TEST(StaticShapeInferenceTest, TileFewRepeatsTest) { TEST(StaticShapeInferenceTest, TileSmallDataRankTest) { auto param0 = std::make_shared(element::f32, PartialShape{-1, -1}); - auto param1 = ov::op::v0::Constant::create(element::i64, Shape{3}, {3, 4, 1}); + auto param1 = ov::op::v0::Constant::create(element::i64, ov::Shape{3}, {3, 4, 1}); auto tile = std::make_shared(param0, param1); // Test Static Shape std::vector static_input_shapes = {StaticShape{8, 10}, StaticShape{3}}; @@ -49,10 +49,10 @@ TEST(StaticShapeInferenceTest, TileSmallDataRankTestRepeatsInConstMap) { auto tile = std::make_shared(param0, param1); int32_t repeats[] = {3, 4, 1}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{3}, repeats}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{3}, repeats}}}; // Test Static Shape - ShapeVector input_shapes = {StaticShape{8, 10}, StaticShape{3}}, output_shapes = {StaticShape{}}; + StaticShapeVector input_shapes = {StaticShape{8, 10}, StaticShape{3}}, output_shapes = {StaticShape{}}; output_shapes = shape_inference(tile.get(), input_shapes, constant_data); ASSERT_EQ(output_shapes.front(), StaticShape({3, 32, 10})); @@ -60,7 +60,7 @@ TEST(StaticShapeInferenceTest, TileSmallDataRankTestRepeatsInConstMap) { TEST(StaticShapeInferenceTest, TileStaticShapeRepeatsAsConst) { auto param0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto param1 = ov::op::v0::Constant::create(element::i64, Shape{2}, {4, 1}); + auto param1 = ov::op::v0::Constant::create(element::i64, ov::Shape{2}, {4, 1}); auto tile = std::make_shared(param0, param1); auto dims = std::vector{{6, 8, 10}, {2}}; @@ -81,7 +81,7 @@ TEST(StaticShapeInferenceTest, TileNewApiInputsStaticRank) { auto tile = std::make_shared(param0, param1); int32_t repeats[] = {3, 4, 1, 2}; - const auto constant_data = std::unordered_map{{1, {element::i32, Shape{4}, repeats}}}; + const auto constant_data = std::unordered_map{{1, {element::i32, ov::Shape{4}, repeats}}}; auto dims = std::vector{{8, 10}, {4}}; auto in_shapes = std::vector(dims.begin(), dims.end()); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/topk_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/topk_shape_inference_test.cpp index 9f1ed780153cfa..37f5d315af9784 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/topk_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/topk_shape_inference_test.cpp @@ -15,10 +15,10 @@ using namespace ov::opset10; using namespace testing; namespace topk_test { -using TopKTestParams = std::tuple; template class TopKTest : public OpStaticShapeInferenceTest, public WithParamInterface { @@ -30,9 +30,9 @@ class TopKTest : public OpStaticShapeInferenceTest, public WithParamInterfa int64_t axis, k; }; -const auto TopkTestValues = Values(make_tuple(ShapeVector{{0}, {}}, 0, 1, StaticShape{1}), - make_tuple(ShapeVector{{5, 2, 10, 0}, {}}, -1, 5, StaticShape{5, 2, 10, 5}), - make_tuple(ShapeVector{{3, 5, 6}, {}}, 1, 2, StaticShape{3, 2, 6})); +const auto TopkTestValues = Values(make_tuple(StaticShapeVector{{0}, {}}, 0, 1, StaticShape{1}), + make_tuple(StaticShapeVector{{5, 2, 10, 0}, {}}, -1, 5, StaticShape{5, 2, 10, 5}), + make_tuple(StaticShapeVector{{3, 5, 6}, {}}, 1, 2, StaticShape{3, 2, 6})); namespace v1 { using TopKV1AssertStaticShapeInferenceTest = OpStaticShapeInferenceTest; @@ -43,11 +43,11 @@ TEST_F(TopKV1AssertStaticShapeInferenceTest, k_is_negative) { const auto op = make_op(data, k_node, 0, "max", "value"); - input_shapes = ShapeVector{{5, 2}, {}}; - output_shapes = ShapeVector(2); + input_shapes = StaticShapeVector{{5, 2}, {}}; + output_shapes = StaticShapeVector(2); int64_t k = -2; - const auto const_map = std::unordered_map{{1, {element::i64, Shape{}, &k}}}; + const auto const_map = std::unordered_map{{1, {element::i64, ov::Shape{}, &k}}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_map), ov::AssertFailure, @@ -59,7 +59,7 @@ INSTANTIATE_TEST_SUITE_P(StaticShapeInference, TopKV1Test, TopkTestValues, Print TEST_P(TopKV1Test, no_constant_map) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); - const auto k_node = Constant::create(element::i64, Shape{}, {k}); + const auto k_node = Constant::create(element::i64, ov::Shape{}, {k}); const auto op = make_op(data, k_node, axis, "max", "value"); @@ -84,7 +84,7 @@ TEST_P(TopKV1Test, k_as_param_in_const_map) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); const auto k_node = std::make_shared(element::i64, PartialShape::dynamic()); - const auto const_map = std::unordered_map{{1, {element::i64, Shape{}, &k}}}; + const auto const_map = std::unordered_map{{1, {element::i64, ov::Shape{}, &k}}}; const auto op = make_op(data, k_node, axis, "min", "value"); @@ -104,11 +104,11 @@ TEST_F(TopKV3AssertStaticShapeInferenceTest, k_is_negative) { const auto op = make_op(data, k_node, 0, "max", "value"); - input_shapes = ShapeVector{{5, 2}, {}}; - output_shapes = ShapeVector(2); + input_shapes = StaticShapeVector{{5, 2}, {}}; + output_shapes = StaticShapeVector(2); int64_t k = -2; - const auto const_map = std::unordered_map{{1, {element::i64, Shape{}, &k}}}; + const auto const_map = std::unordered_map{{1, {element::i64, ov::Shape{}, &k}}}; OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, const_map), ov::AssertFailure, @@ -120,7 +120,7 @@ INSTANTIATE_TEST_SUITE_P(StaticShapeInference, TopKV3Test, TopkTestValues, Print TEST_P(TopKV3Test, k_as_constant) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); - const auto k_node = Constant::create(element::i64, Shape{}, {k}); + const auto k_node = Constant::create(element::i64, ov::Shape{}, {k}); const auto op = make_op(data, k_node, axis, "min", "value"); @@ -145,7 +145,7 @@ TEST_P(TopKV3Test, k_as_param_in_const_map) { const auto data = std::make_shared(element::f32, PartialShape::dynamic()); const auto k_node = std::make_shared(element::i64, PartialShape::dynamic()); - const auto const_map = std::unordered_map{{1, {element::i64, Shape{}, &k}}}; + const auto const_map = std::unordered_map{{1, {element::i64, ov::Shape{}, &k}}}; const auto op = make_op(data, k_node, axis, "max", "value"); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp index 8ade2005186e66..09ada256332408 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/transpose_shape_infernece_test.cpp @@ -87,7 +87,7 @@ TEST(StaticShapeInferenceTest, transpose_input_shape_dim_dynamic) { TEST(StaticShapeInferenceTest, transpose_order_in_constant_map) { const auto input_shape = PartialShape{2, 4, 6, 8}; const auto input = std::make_shared(element::f32, input_shape); - const auto order = std::make_shared(element::i64, Shape{4}); + const auto order = std::make_shared(element::i64, ov::Shape{4}); const auto transpose = std::make_shared(input, order); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/unsqueeze_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/unsqueeze_shape_inference_test.cpp index fbb086f93be8af..014d974e0b995c 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/unsqueeze_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/unsqueeze_shape_inference_test.cpp @@ -17,7 +17,7 @@ using namespace testing; class UnsqueezeStaticShapeInferenceAssertTest : public OpStaticShapeInferenceTest { protected: void SetUp() override { - output_shapes = ShapeVector(1); + output_shapes = StaticShapeVector(1); } }; @@ -26,7 +26,7 @@ TEST_F(UnsqueezeStaticShapeInferenceAssertTest, no_axes) { const auto axes = std::make_shared(element::i64, PartialShape{1}); op = std::make_shared(arg, axes); - input_shapes = ShapeVector{{5, 6}, axes->get_shape()}; + input_shapes = StaticShapeVector{{5, 6}, axes->get_shape()}; try { output_shapes = shape_inference(op.get(), input_shapes); @@ -39,8 +39,8 @@ TEST_F(UnsqueezeStaticShapeInferenceAssertTest, no_axes) { } TEST_F(UnsqueezeStaticShapeInferenceAssertTest, empty_axes) { - const auto arg = std::make_shared(element::f64, Shape{5, 6}); - const auto axes = std::make_shared(element::i64, Shape{0}, std::vector{}); + const auto arg = std::make_shared(element::f64, ov::Shape{5, 6}); + const auto axes = std::make_shared(element::i64, ov::Shape{0}, std::vector{}); try { op = std::make_shared(arg, axes); @@ -52,7 +52,7 @@ TEST_F(UnsqueezeStaticShapeInferenceAssertTest, empty_axes) { } } -using TestParams = std::tuple, // Unsqueeze axes StaticShape // Expected shape >; @@ -64,7 +64,7 @@ class UnsqueezeStaticShapeInferenceTest : public UnsqueezeStaticShapeInferenceAs UnsqueezeStaticShapeInferenceAssertTest::SetUp(); std::tie(input_shapes, axes, exp_shape) = GetParam(); - output_shapes = ShapeVector(1); + output_shapes = StaticShapeVector(1); arg = std::make_shared(element::f32, input_shapes.front().get_shape()); } @@ -74,42 +74,45 @@ class UnsqueezeStaticShapeInferenceTest : public UnsqueezeStaticShapeInferenceAs INSTANTIATE_TEST_SUITE_P(1d_shapes, UnsqueezeStaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{0}, {1}}, std::vector{-1}, StaticShape({0, 1})), - make_tuple(ShapeVector{{0}, {1}}, std::vector{0}, StaticShape({1, 0})), - make_tuple(ShapeVector{{1}, {1}}, std::vector{1}, StaticShape({1, 1})), - make_tuple(ShapeVector{{2}, {1}}, std::vector{0}, StaticShape({1, 2})), - make_tuple(ShapeVector{{2}, {1}}, std::vector{1}, StaticShape({2, 1})), - make_tuple(ShapeVector{{2}, {1}}, std::vector{-1}, StaticShape({2, 1})), - make_tuple(ShapeVector{{2}, {1}}, std::vector{-2}, StaticShape({1, 2}))), + Values(make_tuple(StaticShapeVector{{0}, {1}}, std::vector{-1}, StaticShape({0, 1})), + make_tuple(StaticShapeVector{{0}, {1}}, std::vector{0}, StaticShape({1, 0})), + make_tuple(StaticShapeVector{{1}, {1}}, std::vector{1}, StaticShape({1, 1})), + make_tuple(StaticShapeVector{{2}, {1}}, std::vector{0}, StaticShape({1, 2})), + make_tuple(StaticShapeVector{{2}, {1}}, std::vector{1}, StaticShape({2, 1})), + make_tuple(StaticShapeVector{{2}, {1}}, std::vector{-1}, StaticShape({2, 1})), + make_tuple(StaticShapeVector{{2}, {1}}, std::vector{-2}, StaticShape({1, 2}))), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P( multi_dim_shapes, UnsqueezeStaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{2, 3}, {2}}, std::vector{0, 3}, StaticShape({1, 2, 3, 1})), - make_tuple(ShapeVector{{2, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 1, 1, 4})), - make_tuple(ShapeVector{{3, 2}, {3}}, std::vector{0, 2, 4}, StaticShape({1, 3, 1, 2, 1})), - make_tuple(ShapeVector{{3, 2}, {3}}, std::vector{4, 2, 0}, StaticShape({1, 3, 1, 2, 1})), - make_tuple(ShapeVector{{3, 2}, {3}}, std::vector{2, 0, 4}, StaticShape({1, 3, 1, 2, 1})), - make_tuple(ShapeVector{{10, 0, 3}, {4}}, - std::vector{1, -1, 3, -2}, - StaticShape({10, 1, 0, 1, 3, 1, 1})), - make_tuple(ShapeVector{{2, 6, 7, 8, 3}, {1}}, std::vector{0}, StaticShape({1, 2, 6, 7, 8, 3}))), + Values( + make_tuple(StaticShapeVector{{2, 3}, {2}}, std::vector{0, 3}, StaticShape({1, 2, 3, 1})), + make_tuple(StaticShapeVector{{2, 4}, {2}}, std::vector{2, 1}, StaticShape({2, 1, 1, 4})), + make_tuple(StaticShapeVector{{3, 2}, {3}}, std::vector{0, 2, 4}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(StaticShapeVector{{3, 2}, {3}}, std::vector{4, 2, 0}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(StaticShapeVector{{3, 2}, {3}}, std::vector{2, 0, 4}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(StaticShapeVector{{10, 0, 3}, {4}}, + std::vector{1, -1, 3, -2}, + StaticShape({10, 1, 0, 1, 3, 1, 1})), + make_tuple(StaticShapeVector{{2, 6, 7, 8, 3}, {1}}, std::vector{0}, StaticShape({1, 2, 6, 7, 8, 3}))), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P( multi_dim_shapes_repeated_axis, UnsqueezeStaticShapeInferenceTest, Values( - make_tuple(ShapeVector{{2, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 1, 3})), - make_tuple(ShapeVector{{3, 2}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 1, 2, 1})), - make_tuple(ShapeVector{{3, 2}, {3}}, std::vector{1, -1, 1, -1}, StaticShape({3, 1, 2, 1})), - make_tuple(ShapeVector{{3, 2}, {3}}, std::vector{2, -1, 2, -1, 0}, StaticShape({1, 3, 1, 2, 1})), - make_tuple(ShapeVector{{2, 6, 7, 8, 3}, {2}}, std::vector{-1, -1}, StaticShape({2, 6, 7, 8, 3, 1}))), + make_tuple(StaticShapeVector{{2, 3}, {2}}, std::vector{1, 1}, StaticShape({2, 1, 3})), + make_tuple(StaticShapeVector{{3, 2}, {3}}, std::vector{1, -1, 1}, StaticShape({3, 1, 2, 1})), + make_tuple(StaticShapeVector{{3, 2}, {3}}, std::vector{1, -1, 1, -1}, StaticShape({3, 1, 2, 1})), + make_tuple(StaticShapeVector{{3, 2}, {3}}, std::vector{2, -1, 2, -1, 0}, StaticShape({1, 3, 1, 2, 1})), + make_tuple(StaticShapeVector{{2, 6, 7, 8, 3}, {2}}, + std::vector{-1, -1}, + StaticShape({2, 6, 7, 8, 3, 1}))), PrintToStringParamName()); TEST_P(UnsqueezeStaticShapeInferenceTest, shape_inference_empty_const_map) { - const auto axes_node = std::make_shared(element::i64, Shape{axes.size()}, axes); + const auto axes_node = std::make_shared(element::i64, ov::Shape{axes.size()}, axes); op = std::make_shared(arg, axes_node); output_shapes = shape_inference(op.get(), input_shapes); @@ -118,7 +121,7 @@ TEST_P(UnsqueezeStaticShapeInferenceTest, shape_inference_empty_const_map) { } TEST_P(UnsqueezeStaticShapeInferenceTest, shape_inference_with_const_map) { - const auto axes_node = std::make_shared(element::i64, Shape{1}); + const auto axes_node = std::make_shared(element::i64, ov::Shape{1}); op = std::make_shared(arg, axes_node); const auto axes_tensor = ov::Tensor(element::i64, ov::Shape{axes.size()}, axes.data()); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.cpp index 96949c52b8dfe1..99e51b2b8cce03 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.cpp @@ -6,7 +6,7 @@ namespace ov { namespace intel_cpu { -std::vector make_static_shape_refs(const ShapeVector& shapes) { +std::vector make_static_shape_refs(const StaticShapeVector& shapes) { std::vector out; out.reserve(shapes.size()); for (auto& s : shapes) { @@ -15,9 +15,9 @@ std::vector make_static_shape_refs(const ShapeVector& shapes) { return out; } -ShapeVector shape_inference(ov::Node* op, - const ShapeVector& input_shapes, - const std::unordered_map& constant_data) { +StaticShapeVector shape_inference(ov::Node* op, + const StaticShapeVector& input_shapes, + const std::unordered_map& constant_data) { const auto in_shapes = intel_cpu::make_static_shape_refs(input_shapes); const auto shape_infer = intel_cpu::make_shape_inference(op->shared_from_this()); auto result = shape_infer->infer(in_shapes, make_tensor_accessor(constant_data)); diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp index 43c23941b5c08c..99619e42c307d3 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/utils.hpp @@ -14,20 +14,20 @@ namespace ov { namespace intel_cpu { -using ShapeVector = std::vector; +using StaticShapeVector = std::vector; -std::vector make_static_shape_refs(const ShapeVector& shapes); +std::vector make_static_shape_refs(const StaticShapeVector& shapes); -ShapeVector shape_inference(ov::Node* op, - const ShapeVector& input_shapes, - const std::unordered_map& constant_data = {}); +StaticShapeVector shape_inference(ov::Node* op, + const StaticShapeVector& input_shapes, + const std::unordered_map& constant_data = {}); template class OpStaticShapeInferenceTest : public testing::Test { protected: using op_type = TOp; - ShapeVector input_shapes, output_shapes; + StaticShapeVector input_shapes, output_shapes; ov::intel_cpu::StaticShape exp_shape; std::shared_ptr op; diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/variadic_split_shape_inference_tests.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/variadic_split_shape_inference_tests.cpp index 0a8005b7a11fff..422ea5d1b9d156 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/variadic_split_shape_inference_tests.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/variadic_split_shape_inference_tests.cpp @@ -14,10 +14,10 @@ using namespace ov; using namespace ov::intel_cpu; using namespace testing; -using VariadicSplitTestParams = std::tuple, // split lengths - ShapeVector // Expected shapes + StaticShapeVector // Expected shapes >; class VariadicSplitStaticShapeInferenceTest : public OpStaticShapeInferenceTest, @@ -26,13 +26,12 @@ class VariadicSplitStaticShapeInferenceTest : public OpStaticShapeInferenceTest< void SetUp() override { std::tie(input_shapes, axis, split_lengths, exp_shapes) = GetParam(); - output_shapes = ShapeVector(); data = std::make_shared(element::f32, input_shapes.front().get_shape()); } int64_t axis; std::vector split_lengths; - ShapeVector exp_shapes; + StaticShapeVector exp_shapes; std::shared_ptr data; }; @@ -40,39 +39,45 @@ class VariadicSplitStaticShapeInferenceTest : public OpStaticShapeInferenceTest< INSTANTIATE_TEST_SUITE_P( 1d_shapes, VariadicSplitStaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{0}, {}, {1}}, 0, std::vector{0}, ShapeVector{{0}}), - make_tuple(ShapeVector{{15}, {}, {3}}, -1, std::vector{2, 3, 10}, ShapeVector{{2}, {3}, {10}}), - make_tuple(ShapeVector{{15}, {}, {3}}, 0, std::vector{5, -1, 2}, ShapeVector{{5}, {8}, {2}})), + Values(make_tuple(StaticShapeVector{{0}, {}, {1}}, 0, std::vector{0}, StaticShapeVector{{0}}), + make_tuple(StaticShapeVector{{15}, {}, {3}}, + -1, + std::vector{2, 3, 10}, + StaticShapeVector{{2}, {3}, {10}}), + make_tuple(StaticShapeVector{{15}, {}, {3}}, + 0, + std::vector{5, -1, 2}, + StaticShapeVector{{5}, {8}, {2}})), PrintToStringParamName()); INSTANTIATE_TEST_SUITE_P(multi_dim_shapes, VariadicSplitStaticShapeInferenceTest, - Values(make_tuple(ShapeVector{{2, 6, 5}, {}, {3}}, + Values(make_tuple(StaticShapeVector{{2, 6, 5}, {}, {3}}, 2, std::vector{2, 1, 2}, - ShapeVector{{2, 6, 2}, {2, 6, 1}, {2, 6, 2}}), - make_tuple(ShapeVector{{2, 6, 5}, {}, {2}}, + StaticShapeVector{{2, 6, 2}, {2, 6, 1}, {2, 6, 2}}), + make_tuple(StaticShapeVector{{2, 6, 5}, {}, {2}}, -2, std::vector{2, 4}, - ShapeVector{{2, 2, 5}, {2, 4, 5}}), - make_tuple(ShapeVector{{4, 6, 5}, {}, {3}}, + StaticShapeVector{{2, 2, 5}, {2, 4, 5}}), + make_tuple(StaticShapeVector{{4, 6, 5}, {}, {3}}, 0, std::vector{-1, 3, 1}, - ShapeVector{{0, 6, 5}, {3, 6, 5}, {1, 6, 5}}), - make_tuple(ShapeVector{{4, 6, 5}, {}, {3}}, + StaticShapeVector{{0, 6, 5}, {3, 6, 5}, {1, 6, 5}}), + make_tuple(StaticShapeVector{{4, 6, 5}, {}, {3}}, 0, std::vector{3, -1, 1}, - ShapeVector{{3, 6, 5}, {0, 6, 5}, {1, 6, 5}}), - make_tuple(ShapeVector{{4, 6, 5}, {}, {3}}, + StaticShapeVector{{3, 6, 5}, {0, 6, 5}, {1, 6, 5}}), + make_tuple(StaticShapeVector{{4, 6, 5}, {}, {3}}, 0, std::vector{3, 1, -1}, - ShapeVector{{3, 6, 5}, {1, 6, 5}, {0, 6, 5}})), + StaticShapeVector{{3, 6, 5}, {1, 6, 5}, {0, 6, 5}})), PrintToStringParamName()); TEST_P(VariadicSplitStaticShapeInferenceTest, shape_inference_empty_const_map) { - const auto axis_node = std::make_shared(element::i64, Shape{}, axis); + const auto axis_node = std::make_shared(element::i64, ov::Shape{}, axis); const auto split_len_node = - std::make_shared(element::i64, Shape{split_lengths.size()}, split_lengths); + std::make_shared(element::i64, ov::Shape{split_lengths.size()}, split_lengths); op = make_op(data, axis_node, split_len_node); output_shapes = shape_inference(op.get(), input_shapes); @@ -84,7 +89,7 @@ TEST_P(VariadicSplitStaticShapeInferenceTest, shape_inference_empty_const_map) { TEST_P(VariadicSplitStaticShapeInferenceTest, shape_inference_axis_in_const_map) { const auto axis_node = std::make_shared(element::i64, ov::PartialShape::dynamic()); const auto split_len_node = - std::make_shared(element::i64, Shape{split_lengths.size()}, split_lengths); + std::make_shared(element::i64, ov::Shape{split_lengths.size()}, split_lengths); op = make_op(data, axis_node, split_len_node); const auto axis_tensor = ov::Tensor(element::i64, ov::Shape{}, &axis); @@ -101,8 +106,8 @@ TEST_P(VariadicSplitStaticShapeInferenceTest, shape_inference_all_const_in_map) const auto split_len_node = std::make_shared(element::i64, ov::PartialShape::dynamic()); op = make_op(data, axis_node, split_len_node); - const auto axis_tensor = ov::Tensor(element::i64, Shape{}, &axis); - const auto split_len_tensor = ov::Tensor(element::i64, Shape{split_lengths.size()}, split_lengths.data()); + const auto axis_tensor = ov::Tensor(element::i64, ov::Shape{}, &axis); + const auto split_len_tensor = ov::Tensor(element::i64, ov::Shape{split_lengths.size()}, split_lengths.data()); const auto constant_data = std::unordered_map{{2, split_len_tensor}, {1, axis_tensor}}; diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp index 908c8802981ab8..57f9f5c5d72a14 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp @@ -158,7 +158,11 @@ StreamsCalculationTestCase _2sockets_104cores_latency_platform_3 = { {52, 26, 0, 26, 1, 0}, {52, 26, 0, 26, 2, 1}, {52, 26, 0, 26, 3, 1}}, - {{1, ALL_PROC, 52, 0, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, HYPER_THREADING_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 104, -1, 0}, + {0, MAIN_CORE_PROC, 26, 0, 0}, + {0, MAIN_CORE_PROC, 26, 1, 0}, + {0, HYPER_THREADING_PROC, 26, 0, 0}, + {0, HYPER_THREADING_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_platform_4 = { 1, @@ -170,7 +174,7 @@ StreamsCalculationTestCase _2sockets_104cores_latency_platform_4 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 52, -1, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_socket_1 = { 1, @@ -210,7 +214,11 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_3 = { {52, 26, 0, 26, 1, 0}, {52, 26, 0, 26, 2, 1}, {52, 26, 0, 26, 3, 1}}, - {{1, ALL_PROC, 52, 0, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, HYPER_THREADING_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 104, -1, 0}, + {0, MAIN_CORE_PROC, 26, 0, 0}, + {0, MAIN_CORE_PROC, 26, 1, 0}, + {0, HYPER_THREADING_PROC, 26, 0, 0}, + {0, HYPER_THREADING_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_socket_4 = { 1, @@ -222,7 +230,7 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_4 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 52, -1, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_socket_5 = { 1, @@ -234,7 +242,7 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_5 = { "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {10, 10, 0, 0, 1, 0}, {20, 20, 0, 0, 2, 1}, {20, 20, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 10, 0, 0}}, + {{1, ALL_PROC, 20, -1, 0}, {0, MAIN_CORE_PROC, 10, 0, 0}, {0, MAIN_CORE_PROC, 10, 1, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_socket_6 = { 1, @@ -246,7 +254,7 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_6 = { "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {20, 20, 0, 0, 1, 1}, {10, 10, 0, 0, 2, 0}, {20, 20, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 10, 0, 0}}, + {{1, ALL_PROC, 20, -1, 0}, {0, MAIN_CORE_PROC, 10, 0, 0}, {0, MAIN_CORE_PROC, 10, 2, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_socket_7 = { 1, @@ -258,7 +266,7 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_7 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 52, -1, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_socket_8 = { 1, @@ -2349,7 +2357,11 @@ StreamsCalculationTestCase _2sockets_mock_latency_26 = { {60, 30, 0, 30, 1, 0}, {40, 20, 0, 20, 2, 1}, {20, 10, 0, 10, 3, 1}}, - {{1, ALL_PROC, 80, 0, 0}, {0, MAIN_CORE_PROC, 40, 0, 0}, {0, HYPER_THREADING_PROC, 40, 0, 0}}, + {{1, ALL_PROC, 140, -1, 0}, + {0, MAIN_CORE_PROC, 40, 0, 0}, + {0, MAIN_CORE_PROC, 30, 1, 0}, + {0, HYPER_THREADING_PROC, 40, 0, 0}, + {0, HYPER_THREADING_PROC, 30, 1, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_27 = { 1, @@ -2365,7 +2377,11 @@ StreamsCalculationTestCase _2sockets_mock_latency_27 = { {60, 30, 0, 30, 1, 0}, {40, 20, 0, 20, 2, 1}, {20, 10, 0, 10, 3, 1}}, - {{1, ALL_PROC, 80, 0, 0}, {0, MAIN_CORE_PROC, 40, 0, 0}, {0, HYPER_THREADING_PROC, 40, 0, 0}}, + {{1, ALL_PROC, 140, -1, 0}, + {0, MAIN_CORE_PROC, 40, 0, 0}, + {0, MAIN_CORE_PROC, 30, 1, 0}, + {0, HYPER_THREADING_PROC, 40, 0, 0}, + {0, HYPER_THREADING_PROC, 30, 1, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_28 = { 1, @@ -2635,7 +2651,7 @@ StreamsCalculationTestCase _2sockets_mock_latency_39 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 52, -1, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_40 = { 1, @@ -2647,7 +2663,7 @@ StreamsCalculationTestCase _2sockets_mock_latency_40 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}, {26, 26, 0, 0, 0, 0}}, - {{1, MAIN_CORE_PROC, 26, 1, 0}}, + {{1, ALL_PROC, 52, -1, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_41 = { 1, @@ -2659,7 +2675,7 @@ StreamsCalculationTestCase _2sockets_mock_latency_41 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}}, - {{1, MAIN_CORE_PROC, 26, 2, 1}}, + {{1, ALL_PROC, 52, -1, 1}, {0, MAIN_CORE_PROC, 26, 2, 1}, {0, MAIN_CORE_PROC, 26, 3, 1}}, }; StreamsCalculationTestCase _2sockets_mock_latency_42 = { 1, @@ -2671,7 +2687,7 @@ StreamsCalculationTestCase _2sockets_mock_latency_42 = { "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 3, 1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}}, - {{1, MAIN_CORE_PROC, 26, 3, 1}}, + {{1, ALL_PROC, 52, -1, 1}, {0, MAIN_CORE_PROC, 26, 3, 1}, {0, MAIN_CORE_PROC, 26, 2, 1}}, }; StreamsCalculationTestCase _2sockets_mock_latency_43 = { 1, @@ -2687,7 +2703,11 @@ StreamsCalculationTestCase _2sockets_mock_latency_43 = { {52, 26, 0, 26, 1, 0}, {52, 26, 0, 26, 2, 1}, {52, 26, 0, 26, 3, 1}}, - {{1, ALL_PROC, 52, 0, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, HYPER_THREADING_PROC, 26, 0, 0}}, + {{1, ALL_PROC, 104, -1, 0}, + {0, MAIN_CORE_PROC, 26, 0, 0}, + {0, MAIN_CORE_PROC, 26, 1, 0}, + {0, HYPER_THREADING_PROC, 26, 0, 0}, + {0, HYPER_THREADING_PROC, 26, 1, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_44 = { 1, @@ -2703,7 +2723,11 @@ StreamsCalculationTestCase _2sockets_mock_latency_44 = { {52, 26, 0, 26, 0, 0}, {52, 26, 0, 26, 1, 0}, {52, 26, 0, 26, 2, 1}}, - {{1, ALL_PROC, 52, 3, 1}, {0, MAIN_CORE_PROC, 26, 3, 1}, {0, HYPER_THREADING_PROC, 26, 3, 1}}, + {{1, ALL_PROC, 104, -1, 1}, + {0, MAIN_CORE_PROC, 26, 3, 1}, + {0, MAIN_CORE_PROC, 26, 2, 1}, + {0, HYPER_THREADING_PROC, 26, 3, 1}, + {0, HYPER_THREADING_PROC, 26, 2, 1}}, }; StreamsCalculationTestCase _2sockets_mock_latency_45 = { 1, @@ -2715,7 +2739,7 @@ StreamsCalculationTestCase _2sockets_mock_latency_45 = { "LATENCY", {}, {{208, 208, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 0}, {52, 52, 0, 0, 2, 1}, {52, 52, 0, 0, 3, 1}}, - {{1, MAIN_CORE_PROC, 52, 0, 0}}, + {{1, ALL_PROC, 104, -1, 0}, {0, MAIN_CORE_PROC, 52, 0, 0}, {0, MAIN_CORE_PROC, 52, 1, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_46 = { 1, @@ -2727,7 +2751,7 @@ StreamsCalculationTestCase _2sockets_mock_latency_46 = { "LATENCY", {}, {{208, 208, 0, 0, -1, -1}, {52, 52, 0, 0, 2, 1}, {52, 52, 0, 0, 3, 1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 0}}, - {{1, MAIN_CORE_PROC, 52, 2, 1}}, + {{1, ALL_PROC, 104, -1, 1}, {0, MAIN_CORE_PROC, 52, 2, 1}, {0, MAIN_CORE_PROC, 52, 3, 1}}, }; StreamsCalculationTestCase _2sockets_mock_latency_47 = { 1, @@ -2743,7 +2767,11 @@ StreamsCalculationTestCase _2sockets_mock_latency_47 = { {104, 52, 0, 52, 1, 0}, {104, 52, 0, 52, 2, 1}, {104, 52, 0, 52, 3, 1}}, - {{1, ALL_PROC, 104, 0, 0}, {0, MAIN_CORE_PROC, 52, 0, 0}, {0, HYPER_THREADING_PROC, 52, 0, 0}}, + {{1, ALL_PROC, 208, -1, 0}, + {0, MAIN_CORE_PROC, 52, 0, 0}, + {0, MAIN_CORE_PROC, 52, 1, 0}, + {0, HYPER_THREADING_PROC, 52, 0, 0}, + {0, HYPER_THREADING_PROC, 52, 1, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_48 = { 1, @@ -2759,7 +2787,11 @@ StreamsCalculationTestCase _2sockets_mock_latency_48 = { {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 0}, {104, 52, 0, 52, 2, 1}}, - {{1, ALL_PROC, 104, 3, 1}, {0, MAIN_CORE_PROC, 52, 3, 1}, {0, HYPER_THREADING_PROC, 52, 3, 1}}, + {{1, ALL_PROC, 208, -1, 1}, + {0, MAIN_CORE_PROC, 52, 3, 1}, + {0, MAIN_CORE_PROC, 52, 2, 1}, + {0, HYPER_THREADING_PROC, 52, 3, 1}, + {0, HYPER_THREADING_PROC, 52, 2, 1}}, }; StreamsCalculationTestCase _2sockets_mock_latency_49 = { 1, diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp index 1d5707194c560d..a72f09207bd3a0 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp @@ -26,7 +26,7 @@ static std::shared_ptr get_deconvol auto output_layout = impl_params.get_output_layout(); dnnl::memory::dims stride(prim->stride.begin(), prim->stride.end()); - dnnl::memory::dims dilation(input_layout.get_spatial_rank(), 1); + dnnl::memory::dims dilation(stride.size(), 1); dnnl::memory::dims pad_l(prim->pad.begin(), prim->pad.end()); dnnl::memory::dims pad_r(prim->pad.begin(), prim->pad.end()); @@ -49,6 +49,7 @@ static std::shared_ptr get_deconvol int64_t insert_count = static_cast(output_md.get_dims().size()) - 2 - stride.size(); if (insert_count > 0) { stride.insert(stride.end(), insert_count, 1); + dilation.insert(dilation.end(), insert_count, 0); pad_l.insert(pad_l.end(), insert_count, 0); pad_r.insert(pad_r.end(), insert_count, 0); } diff --git a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp index ae12ed087bc02d..f87f9af5275722 100644 --- a/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/sync_infer_request.cpp @@ -465,21 +465,6 @@ void SyncInferRequest::wait() { iremote_tensor_ptr->copy_from(plugin_tensor.ptr); } } - } else if (!is_dynamic && is_remote_tensor_impl && output_memory) { - auto& stream = m_graph->get_network()->get_stream(); - auto user_mem = remote_tensor_impl_ptr->get_original_memory(); - if (user_mem->get_allocation_type() == cldnn::allocation_type::cl_mem - && output_memory->get_allocation_type() != cldnn::allocation_type::cl_mem) { - auto plugin_tensor = m_plugin_outputs.at(port_idx); - if (is_convert_required(plugin_tensor.ptr->get_element_type(), iremote_tensor_ptr->get_element_type())) { - auto& stream = m_graph->get_network()->get_stream(); - convert_and_copy(plugin_tensor.ptr.get(), iremote_tensor_ptr.get(), stream); - } else { - iremote_tensor_ptr->copy_from(plugin_tensor.ptr); - } - } else { - copy_events.push_back(output_memory->copy_to(stream, *user_mem, false)); - } } else if (is_remote_tensor_impl && is_dynamic) { auto& stream = m_graph->get_network()->get_stream(); auto user_mem = remote_tensor_impl_ptr->get_original_memory(); diff --git a/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp b/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp index e18b098969eb79..260a1c444284cb 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp @@ -30,13 +30,13 @@ std::shared_ptr redirect_new_kv_to_output(const std::shared_ptr cvt_kvcache_to_fp16(const std::shared_ptr& model) { ov::preprocess::PrePostProcessor ppp(model); - for (auto tensor : model->inputs()) { + for (const auto& tensor : model->inputs()) { if (tensor.get_any_name().find("past_key") != std::string::npos) { ppp.input(tensor.get_any_name()).tensor().set_element_type(ov::element::Type_t::f16); } } - for (auto tensor : model->outputs()) { + for (const auto& tensor : model->outputs()) { if (tensor.get_any_name().find("present") != std::string::npos) { ppp.output(tensor.get_any_name()).tensor().set_element_type(ov::element::Type_t::f16); } @@ -55,7 +55,7 @@ void reshape_to_static(std::shared_ptr model, const uint32_t kvcache_size, const KVAxesPosition& kv_axes_position) { std::map new_shapes; - for (auto input : model->inputs()) { + for (const auto& input : model->inputs()) { const auto& input_name = input.get_any_name(); ov::PartialShape new_shape; if (input_name.find("input_ids") != std::string::npos) { @@ -275,7 +275,7 @@ ov::npuw::LLMCompiledModel::LLMCompiledModel(const std::shared_ptr& m auto npudesc = extract_npu_descriptor(plugin); - ov::AnyMap properties_copy = other_props; + ov::AnyMap properties_copy = std::move(other_props); auto prefill_config = get_default_prefill_config(model, npudesc); // NB: GENERATE_HINT is only applicable for default generate config! const ::intel_npu::npuw::llm::GenerateHint generate_hint = m_cfg.get<::intel_npu::NPUW_LLM_GENERATE_HINT>(); diff --git a/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp b/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp index a8c90884d3d926..a73478c0cab5d2 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp @@ -36,17 +36,17 @@ ov::npuw::LLMInferRequest::LLMInferRequest(const std::shared_ptrm_kvcache_compiled->create_infer_request(); m_prefill_request = compiled_model->m_prefill_compiled->create_infer_request(); - for (auto input_port : m_prefill_request->get_compiled_model()->inputs()) { + for (const auto& input_port : m_prefill_request->get_compiled_model()->inputs()) { m_prefill_in_ports.emplace(input_port.get_any_name(), input_port); } - for (auto output_port : m_prefill_request->get_compiled_model()->outputs()) { + for (const auto& output_port : m_prefill_request->get_compiled_model()->outputs()) { m_prefill_out_ports.emplace(output_port.get_any_name(), output_port); } - for (auto input_port : m_kvcache_request->get_compiled_model()->inputs()) { + for (const auto& input_port : m_kvcache_request->get_compiled_model()->inputs()) { m_kvcache_in_ports.emplace(input_port.get_any_name(), input_port); } - for (auto output_port : m_kvcache_request->get_compiled_model()->outputs()) { + for (const auto& output_port : m_kvcache_request->get_compiled_model()->outputs()) { m_kvcache_out_ports.emplace(output_port.get_any_name(), output_port); } } diff --git a/src/plugins/intel_npu/src/plugin/npuw/weights_bank.cpp b/src/plugins/intel_npu/src/plugin/npuw/weights_bank.cpp index 5ff064e7629759..ddc8f10ccf034e 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/weights_bank.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/weights_bank.cpp @@ -132,10 +132,15 @@ ov::Tensor Bank::eval_and_alloc(const LazyTensor& tensor, bool Bank::is_remote(const LazyTensor& tensor) const { // FIXME: make generic + std::lock_guard guard(m_mutex); + auto npu_bank = m_device_banks.find("NPU"); - if (npu_bank != m_device_banks.end() && npu_bank->second.storage.find(tensor) != npu_bank->second.storage.end()) { - // Found in NPU bank so considered remote (utterly wrong for the generic case) - return true; + if (npu_bank != m_device_banks.end()) { + std::lock_guard dev_guard(npu_bank->second.mutex); + if (npu_bank->second.storage.find(tensor) != npu_bank->second.storage.end()) { + // Found in NPU bank so considered remote (utterly wrong for the generic case) + return true; + } } return false; } diff --git a/src/plugins/intel_npu/src/plugin/npuw/weights_bank.hpp b/src/plugins/intel_npu/src/plugin/npuw/weights_bank.hpp index 491e962a58b438..f2ca0436607fd4 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/weights_bank.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/weights_bank.hpp @@ -38,13 +38,13 @@ class Bank { // Bank for specified device and their allocated memory struct DeviceBank { std::unordered_map storage; - std::mutex mutex; + mutable std::mutex mutex; }; std::unordered_map m_device_banks; ov::Tensor eval_and_alloc(const LazyTensor& tensor, DeviceBank& dbank, const std::string& device); - std::mutex m_mutex; + mutable std::mutex m_mutex; std::shared_ptr m_core = nullptr; std::string m_alloc_device; }; diff --git a/tests/requirements_tensorflow b/tests/requirements_tensorflow index 954bba7944245f..5369b0135f7618 100644 --- a/tests/requirements_tensorflow +++ b/tests/requirements_tensorflow @@ -1,8 +1,9 @@ # test ovc with NumPy 2.x on Ubuntu 24 with default Python 3.12 # test against NumPy 1.x with older Python versions # tensorflow-intel 2.18.0 depends on numpy<2.1.0 and >=1.26.0 -numpy==1.26.4; python_version < "3.12" -numpy==2.0.2; python_version >= "3.12" +# tensorflow 2.16.2 depends on numpy<2.0.0 and >=1.26.0; python_version >= "3.12" +numpy==1.26.4; python_version < "3.12" or platform_system == "Darwin" and platform_machine == "x86_64" +numpy==2.0.2; python_version >= "3.12" and (platform_system != "Darwin" or platform_machine != "x86_64") pytest==7.0.1 pytest-xdist[psutil]==3.6.1 pytest-html==4.1.1 @@ -16,7 +17,8 @@ wrapt==1.15.0; python_version >= "3.12" # tensorflow-text is not available for both Windows and ARM platforms tensorflow-text==2.18.0; python_version < "3.12" and platform_system == "Linux" and platform_machine == "x86_64" tensorflow-hub==0.16.1 -jax==0.4.35; platform_system != "Darwin" or platform_machine != "x86_64" +jax==0.4.35; (platform_system != "Darwin" or platform_machine != "x86_64") and python_version > "3.9" # tensorflow 2.16.2 depends on ml-dtypes~=0.3.1 and jax 0.4.35 depends on ml-dtypes>=0.4.0 -jax==0.4.33; platform_system == "Darwin" and platform_machine == "x86_64" +jax==0.4.33; (platform_system == "Darwin" and platform_machine == "x86_64") and python_version > "3.9" +jax==0.4.30; python_version <= "3.9" defusedxml==0.7.1