diff --git a/src/core/dev_api/openvino/core/type_util.hpp b/src/core/dev_api/openvino/core/type_util.hpp new file mode 100644 index 00000000000000..45d62d286bf218 --- /dev/null +++ b/src/core/dev_api/openvino/core/type_util.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/type.hpp" + +namespace ov { + +/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to any of the specified types +template +bool is_type_any_of(Value value) { + return is_type(value) || (is_type_any_of(value) || ...); +} + +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp index 246705f6bc22bb..8e5297ac8f42c4 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp @@ -172,7 +172,7 @@ void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vec auto expected_out_type = snippets::RegType::undefined; const auto& node = expression->get_node(); // Note: currently only a few operations are allowed to have mixed in/out register types => skip validation here - if (!ov::is_type(node) && !ov::is_type(node) && + if (!ov::is_type_any_of(node) && !std::dynamic_pointer_cast(emitter)) { std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter); } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp index 49f6826e81433d..b43cb12c05f722 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp @@ -5,6 +5,7 @@ #include "jit_kernel_emitter.hpp" #include "jit_snippets_emitters.hpp" +#include "openvino/core/type_util.hpp" #include "snippets/utils/reg_utils.hpp" #include "utils.hpp" @@ -125,7 +126,7 @@ void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vec const auto& node = expression->get_node(); // Note: A few operations are allowed to have mixed register types on their inputs (or outputs) => skip // validation here - if (!ov::is_type(node) && !ov::is_type(node) && + if (!ov::is_type_any_of(node) && !std::dynamic_pointer_cast(emitter)) { std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter); } diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 0acbe387f1c464..1098cef65be775 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -29,6 +29,7 @@ #include "onednn/iml_type_mapper.h" #include "openvino/cc/factory.h" #include "openvino/core/node.hpp" +#include "openvino/core/type_util.hpp" #include "perf_count.h" #include "selective_build.h" #include "utils/bit_util.hpp" diff --git a/src/plugins/intel_cpu/src/nodes/conv.cpp b/src/plugins/intel_cpu/src/nodes/conv.cpp index c0721734d0a27e..f433e9e712dede 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/conv.cpp @@ -226,7 +226,7 @@ class Convolution::FusedSubgraph { bool Convolution::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!ov::is_type(op) && !ov::is_type(op)) { + if (!ov::is_type_any_of(op)) { errorMessage = "Only opset1 Convolution and GroupConvolution operations are supported"; return false; } diff --git a/src/plugins/intel_cpu/src/nodes/dft.cpp b/src/plugins/intel_cpu/src/nodes/dft.cpp index 8c535d1e300005..95b899199a84c7 100644 --- a/src/plugins/intel_cpu/src/nodes/dft.cpp +++ b/src/plugins/intel_cpu/src/nodes/dft.cpp @@ -27,10 +27,7 @@ bool DFT::isSupportedOperation(const std::shared_ptr& op, std::s errorMessage = "Doesn't support op with dynamic shapes"; return false; } - const auto interpDFT = ov::is_type(op); - const auto interpIDFT = ov::is_type(op); - - if (!interpDFT && !interpIDFT) { + if (!ov::is_type_any_of(op)) { errorMessage = "Only opset7 DFT/IDFT operation is supported"; return false; } diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 0b2eda4dfaada5..b3e23ee5e2927e 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -78,9 +78,7 @@ ov::element::TypeVector FullyConnected::getSupportedCompressedActivationsTypes() bool FullyConnected::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!ov::is_type(op) && - !ov::is_type(op) && - !ov::is_type(op)) { + if (!ov::is_type(op)) { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/pooling.cpp b/src/plugins/intel_cpu/src/nodes/pooling.cpp index e6685536b33ad8..8bc1a23f08d169 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/pooling.cpp @@ -145,19 +145,21 @@ dnnl::pooling_forward::primitive_desc createDescriptorHelper(const dnnl::engine& bool Pooling::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (ov::is_type(op) || ov::is_type(op)) { + if (ov::is_type_any_of(op)) { if (!op->get_output_target_inputs(1).empty()) { errorMessage = "MaxPool from opset8 and opset14 is supported only with one output"; return false; } - } else if (!ov::is_type(op) && !ov::is_type(op) && - !ov::is_type(op) && !ov::is_type(op) && - !ov::is_type(op)) { + } else if (!ov::is_type_any_of(op)) { errorMessage = "Supported ops are MaxPool-1, MaxPool-8, MaxPool-14, AvgPool-1 and AvgPool-14"; return false; } #if defined(OV_CPU_WITH_ACL) - if (ov::as_type_ptr(op) || ov::as_type_ptr(op)) { + if (ov::is_type_any_of(op)) { if (ov::as_type_ptr(op)->get_kernel() != ov::Shape(2, 2)) { errorMessage = "Pooling indices returning source tensor coordinates is only supported for pool size 2x2"; diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index b9b5dce5bb2c14..c5bab653eef2d8 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -20,8 +20,7 @@ namespace ov::intel_cpu::node { bool StridedSlice::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!ov::is_type(op) && !ov::is_type(op) && - !ov::is_type(op)) { + if (!ov::is_type_any_of(op)) { errorMessage = "Only StridedSlice from opset1, Slice from opset8 and SliceScatter from opset15 operations " "are supported."; return false; diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index a47f48395fa9de..bf2df0893c058d 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -623,9 +623,9 @@ std::shared_ptr make_shape_inference(std::shared_ptr(op)) { return std::make_shared(std::move(op)); - } else if (ov::is_type(op) || - ov::is_type(op) || - ov::is_type(op)) { + } else if (ov::is_type_any_of(op)) { return std::make_shared(std::move(op)); } else { return std::make_shared(std::move(op)); diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp index 198119578aecd0..317fa87b01bf9b 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp @@ -8,6 +8,7 @@ #include "openvino/core/core.hpp" #include "openvino/core/node.hpp" +#include "openvino/core/type_util.hpp" #include "shape_inference/shape_inference_cpu.hpp" #include "shape_inference/static_shape.hpp" #include "tensor_data_accessor.hpp" diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp index 23f7710eea0b27..c7d24105631193 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp @@ -177,8 +177,7 @@ StatefulSDPAFusion::StatefulSDPAFusion() { // the second one leads to Assign, and this is checked later // the third child is allowed to be a ShapeOf op only, thus one of them must be ShapeOf if (!std::any_of(children.begin(), children.end(), [](const ov::Input& child) { - return ov::is_type(child.get_node()) || - ov::is_type(child.get_node()); + return ov::is_type(child.get_node()); })) { return false; } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp index 2e89031a8ac354..2acc471ac175ed 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp @@ -4,6 +4,7 @@ #include "snippets_mark_skipped.hpp" #include "itt.hpp" +#include "openvino/core/type_util.hpp" #include "snippets/op/subgraph.hpp" #include "snippets/pass/tokenization.hpp" #include "snippets/utils/utils.hpp" @@ -72,14 +73,16 @@ bool isFullyConnected(const std::shared_ptr& node) { bool SupportsFusingWithConvolution_Simple(const std::shared_ptr& node) { // Note: some other operations support this fusing (SoftPlus, Sqrt). // Skip them here, when they are supported by Snippets ARM. Ticket: 141170. - return ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node); + return ov::is_type_any_of(node); } // Convolution is a special case, since it supports peculiar fusings bool isSuitableConvolutionParent(const std::shared_ptr& node) { - const bool is_suitable_node = - ov::is_type(node) || ov::is_type(node); + const bool is_suitable_node = ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); @@ -93,9 +96,8 @@ bool isSuitableBinaryConvolutionParent(const std::shared_ptr& node) return is_suitable_node && has_only_child; } bool isSuitableMiscParent(const std::shared_ptr& node) { - const bool is_suitable_node = ov::is_type(node) || - ov::is_type(node) || - ov::is_type(node); + const bool is_suitable_node = + ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); @@ -126,8 +128,7 @@ bool isSuitableChildForFusingBias(const std::shared_ptr& node, int f } auto is_suitable_parent = [](const std::shared_ptr& node) { - return (ov::is_type(node) || ov::is_type(node) || - ov::is_type(node)); + return (ov::is_type_any_of(node)); }; for (const auto& in : node->inputs()) { @@ -221,8 +222,7 @@ bool isSuitableConvert(const std::shared_ptr& node) { } auto is_skipped_op(const std::shared_ptr& op) -> bool { - return ov::is_type(op) || ov::is_type(op) || - ov::is_type(op); + return ov::is_type_any_of(op); } bool isSuitableMatMulWithConstantPath(const std::shared_ptr& node) { diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp index 7269e0d7a62b95..adf837b08909be 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp @@ -5,6 +5,7 @@ #include "cpu/x64/cpu_isa_traits.hpp" #include "itt.hpp" +#include "openvino/core/type_util.hpp" #include "snippets/op/subgraph.hpp" #include "snippets/pass/tokenization.hpp" #include "snippets/utils/utils.hpp" @@ -72,11 +73,16 @@ bool isFullyConnected(const std::shared_ptr& node) { bool SupportsFusingWithConvolution_SumActivation(const std::shared_ptr& node) { // todo: Do all PReLUs are fused? Not sure about round and softRelu // EltwiseRoundHalfToEven, EltwiseRoundHalfAwayFromZero, EltwiseSoftRelu - return ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node); + return ov::is_type_any_of(node); } bool canBePerformedAsScaleShift(const std::shared_ptr& node, const int channelAxis) { @@ -120,8 +126,7 @@ bool canBePerformedAsScaleShift(const std::shared_ptr& node, const i // Prelu and MulAdd are still ignored // isConvertablePowerStatic() is ignored - return (ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node)) && + return ov::is_type_any_of(node) && isBroadcastableToDataInput(); } @@ -131,15 +136,18 @@ inline bool canBeMatMulExecutedInInt8(const ov::element::Type& firstType, const bool SupportsFusingWithConvolution_Simple(const std::shared_ptr& node, const int channelAxis = DEFAULT_AXIS) { - return SupportsFusingWithConvolution_SumActivation(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || canBePerformedAsScaleShift(node, channelAxis); + return SupportsFusingWithConvolution_SumActivation(node) || + ov::is_type_any_of(node) || + canBePerformedAsScaleShift(node, channelAxis); } // Convolution is a special case, since it supports peculiar fusings bool isSuitableConvolutionParent(const std::shared_ptr& node) { - const bool is_suitable_node = - ov::is_type(node) || ov::is_type(node); + const bool is_suitable_node = ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); @@ -168,14 +176,18 @@ int getChannelAxis(const ov::AxisSet& axes, bool keep_dims) { return channelAxis; } bool isSuitableMiscParent(const std::shared_ptr& node) { - const bool is_suitable_node = - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || - ov::is_type(node) || ov::is_type(node) || - ov::is_type(node); + const bool is_suitable_node = ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); @@ -307,9 +319,11 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr& node, // MatMul specific checks from ::canFuse() if (one_of(updatedChainType, NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8)) { - const auto is_binary_eltwise = ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || - ov::is_type(node) || ov::is_type(node); + const auto is_binary_eltwise = ov::is_type_any_of(node); const auto rank = node->get_output_partial_shape(0).rank(); if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && rank.is_static() && is_binary_eltwise) { const auto const1 = ov::is_type(node->get_input_node_shared_ptr(0)); @@ -490,8 +504,7 @@ bool isSuitableConvert(const std::shared_ptr& node) { } auto is_skipped_op(const std::shared_ptr& op) -> bool { - return ov::is_type(op) || ov::is_type(op) || - ov::is_type(op); + return ov::is_type_any_of(op); } } // namespace diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp index c1304ba2477b35..0c6d5be82f2559 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp @@ -26,8 +26,7 @@ namespace ov::intel_cpu::tpp::op { #define UNARY_AUX_METHODS(UNARY_OP) GENERAL_AUX_METHODS(UNARY_OP, UnaryEltwiseTPP, new_args.at(0)) bool EltwiseTPP::is_supported(const std::shared_ptr& node) { - return ov::is_type(node) || ov::is_type(node) || - ov::is_type(node) || ov::is_type(node); + return ov::is_type_any_of(node); } bool EltwiseTPP::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp index 2aa001c23e36e9..68e1b5b75913da 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp @@ -6,6 +6,7 @@ #include "descriptor.hpp" #include "modifiers.hpp" +#include "openvino/core/type_util.hpp" #include "openvino/op/add.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/exp.hpp" diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp index aea816d03c0f64..3214142bf6ce27 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp @@ -27,10 +27,8 @@ bool FuseTPPToEquations::fuse_from_root(const NodePtr& root, const std::shared_p auto get_tpp_op = [](const NodePtr& n) { auto tpp = std::dynamic_pointer_cast(n); bool not_supported_op = - // ticket: 152532 - ov::is_type(n) || - // ticket: 152510 - ov::is_type(n); + // tickets: 152532, 152510 + ov::is_type_any_of(n); return not_supported_op ? nullptr : tpp; }; diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 5d095a4c80119b..754a1d55cc2c54 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -244,7 +244,7 @@ bool Transformations::fuse_type_to_fq(const std::shared_ptr& node, con auto consumers = node->output(0).get_target_inputs(); for (auto& input : consumers) { const auto consumer = input.get_node(); - if (ov::is_type(consumer) || ov::is_type(consumer)) { + if (ov::is_type_any_of(consumer)) { continue; } auto convert_after = std::make_shared(node, to); @@ -1131,22 +1131,37 @@ void Transformations::MainSnippets(void) { auto is_supported_op = [](const std::shared_ptr& n) -> bool { #if defined(OPENVINO_ARCH_ARM64) - return (ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n)); + return (ov::is_type_any_of(n)); #else // CPU Plugin support Swish in Subgraph via conversion to SwichCPU which assumes second input to be constant, // and CPU Plugin does not support Mish for x64 @@ -1158,10 +1173,14 @@ void Transformations::MainSnippets(void) { // todo: general tokenization flow is not currently supported for these operations. // they can be tokenized only as a part of complex patterns auto is_unsupported_by_common_tokenization = [](const std::shared_ptr& n) { - return (ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n)); + return (ov::is_type_any_of(n)); }; return !is_unsupported(n) && !is_unsupported_by_common_tokenization(n); #endif @@ -1189,8 +1208,10 @@ void Transformations::MainSnippets(void) { return supported_element_types.count(t.get_element_type()) != 0 || (is_input && t.get_element_type() == ov::element::i32 && - (ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n))); + (ov::is_type_any_of(n))); }; const auto& inputs = n->inputs(); diff --git a/src/plugins/intel_cpu/src/transformations/utils.cpp b/src/plugins/intel_cpu/src/transformations/utils.cpp index eb1c15077a98f0..b3def025efc845 100644 --- a/src/plugins/intel_cpu/src/transformations/utils.cpp +++ b/src/plugins/intel_cpu/src/transformations/utils.cpp @@ -4,6 +4,7 @@ #include "utils.hpp" +#include "openvino/core/type_util.hpp" #include "openvino/opsets/opset1.hpp" #include "ov_ops/fully_connected.hpp" #include "transformations/rt_info/dequantization_node.hpp" @@ -22,7 +23,7 @@ bool has_matmul_with_compressed_weights(const std::shared_ptr& }; for (const auto& op : model->get_ops()) { - if (!ov::is_type(op) && !ov::is_type(op)) { + if (!ov::is_type_any_of(op)) { continue; }