Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CPU] Introduce ov::is_type_any_of for chaining ov::is_type calls #29102

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions src/core/dev_api/openvino/core/type_util.hpp
Copy link
Contributor

@ilya-lavrenov ilya-lavrenov Feb 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we need a special header for a single function?

I suppose existing openvino/core/type.hpp is a good home for this function

UPD: I saw that it was recommended to put into Dev API, but IMO this function is harmless to be in public API. It's just a useful util

Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "openvino/core/type.hpp"

namespace ov {

/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to any of the specified types
template <typename Type, typename... Types, typename Value>
bool is_type_any_of(Value value) {
return is_type<Type>(value) || (is_type_any_of<Types>(value) || ...);
}

} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ void jit_kernel_emitter::emit_impl(const std::vector<size_t>& in, const std::vec
auto expected_out_type = snippets::RegType::undefined;
const auto& node = expression->get_node();
// Note: currently only a few operations are allowed to have mixed in/out register types => skip validation here
if (!ov::is_type<snippets::op::LoopEnd>(node) && !ov::is_type<snippets::op::RegSpillBase>(node) &&
if (!ov::is_type_any_of<snippets::op::LoopEnd, snippets::op::RegSpillBase>(node) &&
!std::dynamic_pointer_cast<jit_nop_emitter>(emitter)) {
std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "jit_kernel_emitter.hpp"

#include "jit_snippets_emitters.hpp"
#include "openvino/core/type_util.hpp"
#include "snippets/utils/reg_utils.hpp"
#include "utils.hpp"

Expand Down Expand Up @@ -125,7 +126,7 @@ void jit_kernel_emitter::emit_impl(const std::vector<size_t>& in, const std::vec
const auto& node = expression->get_node();
// Note: A few operations are allowed to have mixed register types on their inputs (or outputs) => skip
// validation here
if (!ov::is_type<snippets::op::LoopEnd>(node) && !ov::is_type<snippets::op::RegSpillBase>(node) &&
if (!ov::is_type_any_of<snippets::op::LoopEnd, snippets::op::RegSpillBase>(node) &&
!std::dynamic_pointer_cast<jit_nop_emitter>(emitter)) {
std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter);
}
Expand Down
1 change: 1 addition & 0 deletions src/plugins/intel_cpu/src/node.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include "onednn/iml_type_mapper.h"
#include "openvino/cc/factory.h"
#include "openvino/core/node.hpp"
#include "openvino/core/type_util.hpp"
#include "perf_count.h"
#include "selective_build.h"
#include "utils/bit_util.hpp"
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ class Convolution::FusedSubgraph {

bool Convolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {
if (!ov::is_type<ov::op::v1::Convolution>(op) && !ov::is_type<ov::op::v1::GroupConvolution>(op)) {
if (!ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution>(op)) {
errorMessage = "Only opset1 Convolution and GroupConvolution operations are supported";
return false;
}
Expand Down
5 changes: 1 addition & 4 deletions src/plugins/intel_cpu/src/nodes/dft.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,7 @@ bool DFT::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::s
errorMessage = "Doesn't support op with dynamic shapes";
return false;
}
const auto interpDFT = ov::is_type<const op::v7::DFT>(op);
const auto interpIDFT = ov::is_type<const op::v7::IDFT>(op);

if (!interpDFT && !interpIDFT) {
if (!ov::is_type_any_of<const op::v7::DFT, const op::v7::IDFT>(op)) {
errorMessage = "Only opset7 DFT/IDFT operation is supported";
return false;
}
Expand Down
4 changes: 1 addition & 3 deletions src/plugins/intel_cpu/src/nodes/fullyconnected.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,7 @@ ov::element::TypeVector FullyConnected::getSupportedCompressedActivationsTypes()
bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
std::string& errorMessage) noexcept {
try {
if (!ov::is_type<const ov::op::internal::FullyConnected>(op) &&
!ov::is_type<const ov::op::internal::FullyConnectedQuantizedLegacy>(op) &&
!ov::is_type<const ov::op::internal::FullyConnectedCompressed>(op)) {
if (!ov::is_type<const ov::op::internal::FullyConnected>(op)) {
return false;
}

Expand Down
12 changes: 7 additions & 5 deletions src/plugins/intel_cpu/src/nodes/pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,19 +145,21 @@ dnnl::pooling_forward::primitive_desc createDescriptorHelper(const dnnl::engine&

bool Pooling::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {
if (ov::is_type<const ov::op::v8::MaxPool>(op) || ov::is_type<const ov::op::v14::MaxPool>(op)) {
if (ov::is_type_any_of<const ov::op::v8::MaxPool, const ov::op::v14::MaxPool>(op)) {
if (!op->get_output_target_inputs(1).empty()) {
errorMessage = "MaxPool from opset8 and opset14 is supported only with one output";
return false;
}
} else if (!ov::is_type<const ov::op::v1::MaxPool>(op) && !ov::is_type<const ov::op::v8::MaxPool>(op) &&
!ov::is_type<const ov::op::v14::MaxPool>(op) && !ov::is_type<const ov::op::v1::AvgPool>(op) &&
!ov::is_type<const ov::op::v14::AvgPool>(op)) {
} else if (!ov::is_type_any_of<const ov::op::v1::MaxPool,
const ov::op::v8::MaxPool,
const ov::op::v14::MaxPool,
const ov::op::v1::AvgPool,
const ov::op::v14::AvgPool>(op)) {
errorMessage = "Supported ops are MaxPool-1, MaxPool-8, MaxPool-14, AvgPool-1 and AvgPool-14";
return false;
}
#if defined(OV_CPU_WITH_ACL)
if (ov::as_type_ptr<const ov::op::v8::MaxPool>(op) || ov::as_type_ptr<const ov::op::v14::MaxPool>(op)) {
if (ov::is_type_any_of<const ov::op::v8::MaxPool, const ov::op::v14::MaxPool>(op)) {
if (ov::as_type_ptr<const ov::op::util::MaxPoolBase>(op)->get_kernel() != ov::Shape(2, 2)) {
errorMessage =
"Pooling indices returning source tensor coordinates is only supported for pool size 2x2";
Expand Down
3 changes: 1 addition & 2 deletions src/plugins/intel_cpu/src/nodes/strided_slice.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ namespace ov::intel_cpu::node {

bool StridedSlice::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {
if (!ov::is_type<ov::op::v1::StridedSlice>(op) && !ov::is_type<ov::op::v8::Slice>(op) &&
!ov::is_type<ov::op::v15::SliceScatter>(op)) {
if (!ov::is_type_any_of<ov::op::v1::StridedSlice, ov::op::v8::Slice, ov::op::v15::SliceScatter>(op)) {
errorMessage = "Only StridedSlice from opset1, Slice from opset8 and SliceScatter from opset15 operations "
"are supported.";
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -623,9 +623,9 @@ std::shared_ptr<IStaticShapeInfer> make_shape_inference(std::shared_ptr<ov::Node
return shape_infer;
} else if (ov::is_type<op::util::UnaryElementwiseArithmetic>(op)) {
return std::make_shared<ShapeInferCopy>(std::move(op));
} else if (ov::is_type<op::util::BinaryElementwiseArithmetic>(op) ||
ov::is_type<op::util::BinaryElementwiseComparison>(op) ||
ov::is_type<op::util::BinaryElementwiseLogical>(op)) {
} else if (ov::is_type_any_of<op::util::BinaryElementwiseArithmetic,
op::util::BinaryElementwiseComparison,
op::util::BinaryElementwiseLogical>(op)) {
return std::make_shared<ShapeInferEltwise>(std::move(op));
} else {
return std::make_shared<ShapeInferFallback>(std::move(op));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include "openvino/core/core.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/type_util.hpp"
#include "shape_inference/shape_inference_cpu.hpp"
#include "shape_inference/static_shape.hpp"
#include "tensor_data_accessor.hpp"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,7 @@ StatefulSDPAFusion::StatefulSDPAFusion() {
// the second one leads to Assign, and this is checked later
// the third child is allowed to be a ShapeOf op only, thus one of them must be ShapeOf
if (!std::any_of(children.begin(), children.end(), [](const ov::Input<ov::Node>& child) {
return ov::is_type<ov::op::v3::ShapeOf>(child.get_node()) ||
ov::is_type<ov::op::v0::ShapeOf>(child.get_node());
return ov::is_type<ov::op::util::ShapeOfBase>(child.get_node());
})) {
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include "snippets_mark_skipped.hpp"

#include "itt.hpp"
#include "openvino/core/type_util.hpp"
#include "snippets/op/subgraph.hpp"
#include "snippets/pass/tokenization.hpp"
#include "snippets/utils/utils.hpp"
Expand Down Expand Up @@ -72,14 +73,16 @@ bool isFullyConnected(const std::shared_ptr<const ov::Node>& node) {
bool SupportsFusingWithConvolution_Simple(const std::shared_ptr<const Node>& node) {
// Note: some other operations support this fusing (SoftPlus, Sqrt).
// Skip them here, when they are supported by Snippets ARM. Ticket: 141170.
return ov::is_type<ov::op::v0::Abs>(node) || ov::is_type<ov::op::v0::Clamp>(node) ||
ov::is_type<ov::op::v0::Elu>(node) || ov::is_type<ov::op::v0::Relu>(node) ||
ov::is_type<ov::op::v0::Sigmoid>(node) || ov::is_type<ov::op::v0::Tanh>(node);
return ov::is_type_any_of<ov::op::v0::Abs,
ov::op::v0::Clamp,
ov::op::v0::Elu,
ov::op::v0::Relu,
ov::op::v0::Sigmoid,
ov::op::v0::Tanh>(node);
}
// Convolution is a special case, since it supports peculiar fusings
bool isSuitableConvolutionParent(const std::shared_ptr<const Node>& node) {
const bool is_suitable_node =
ov::is_type<ov::op::v1::Convolution>(node) || ov::is_type<ov::op::v1::GroupConvolution>(node);
const bool is_suitable_node = ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution>(node);
// has a single output, connected to a single child
const auto out = node->outputs();
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
Expand All @@ -93,9 +96,8 @@ bool isSuitableBinaryConvolutionParent(const std::shared_ptr<const Node>& node)
return is_suitable_node && has_only_child;
}
bool isSuitableMiscParent(const std::shared_ptr<const Node>& node) {
const bool is_suitable_node = ov::is_type<ov::op::v0::NormalizeL2>(node) ||
ov::is_type<ov::op::v1::ConvolutionBackpropData>(node) ||
ov::is_type<ov::op::v1::GroupConvolutionBackpropData>(node);
const bool is_suitable_node =
ov::is_type_any_of<ov::op::v0::NormalizeL2, ov::op::util::ConvolutionBackPropBase>(node);
// has a single output, connected to a single child
const auto out = node->outputs();
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
Expand Down Expand Up @@ -126,8 +128,7 @@ bool isSuitableChildForFusingBias(const std::shared_ptr<const Node>& node, int f
}

auto is_suitable_parent = [](const std::shared_ptr<const Node>& node) {
return (ov::is_type<ov::op::v1::Convolution>(node) || ov::is_type<ov::op::v1::GroupConvolution>(node) ||
ov::is_type<ov::op::v0::MatMul>(node));
return (ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution, ov::op::v0::MatMul>(node));
};

for (const auto& in : node->inputs()) {
Expand Down Expand Up @@ -221,8 +222,7 @@ bool isSuitableConvert(const std::shared_ptr<const Node>& node) {
}

auto is_skipped_op(const std::shared_ptr<ov::Node>& op) -> bool {
return ov::is_type<ov::op::v0::Constant>(op) || ov::is_type<ov::op::v0::Parameter>(op) ||
ov::is_type<ov::op::v0::Result>(op);
return ov::is_type_any_of<ov::op::v0::Constant, ov::op::v0::Parameter, ov::op::v0::Result>(op);
}

bool isSuitableMatMulWithConstantPath(const std::shared_ptr<Node>& node) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

#include "cpu/x64/cpu_isa_traits.hpp"
#include "itt.hpp"
#include "openvino/core/type_util.hpp"
#include "snippets/op/subgraph.hpp"
#include "snippets/pass/tokenization.hpp"
#include "snippets/utils/utils.hpp"
Expand Down Expand Up @@ -72,11 +73,16 @@ bool isFullyConnected(const std::shared_ptr<const ov::Node>& node) {
bool SupportsFusingWithConvolution_SumActivation(const std::shared_ptr<const Node>& node) {
// todo: Do all PReLUs are fused? Not sure about round and softRelu
// EltwiseRoundHalfToEven, EltwiseRoundHalfAwayFromZero, EltwiseSoftRelu
return ov::is_type<ov::op::v0::Relu>(node) || ov::is_type<ov::op::v0::PRelu>(node) ||
ov::is_type<ov::op::v0::Elu>(node) || ov::is_type<ov::op::v0::Sigmoid>(node) ||
ov::is_type<ov::op::v5::HSigmoid>(node) || ov::is_type<ov::op::v0::Clamp>(node) ||
ov::is_type<ov::op::v4::Swish>(node) || ov::is_type<ov::op::v4::HSwish>(node) ||
ov::is_type<ov::op::v4::Mish>(node) || ov::is_type<ov::op::v5::Round>(node);
return ov::is_type_any_of<ov::op::v0::Relu,
ov::op::v0::PRelu,
ov::op::v0::Elu,
ov::op::v0::Sigmoid,
ov::op::v5::HSigmoid,
ov::op::v0::Clamp,
ov::op::v4::Swish,
ov::op::v4::HSwish,
ov::op::v4::Mish,
ov::op::v5::Round>(node);
}

bool canBePerformedAsScaleShift(const std::shared_ptr<const Node>& node, const int channelAxis) {
Expand Down Expand Up @@ -120,8 +126,7 @@ bool canBePerformedAsScaleShift(const std::shared_ptr<const Node>& node, const i

// Prelu and MulAdd are still ignored
// isConvertablePowerStatic() is ignored
return (ov::is_type<ov::opset1::Add>(node) || ov::is_type<ov::opset1::Multiply>(node) ||
ov::is_type<ov::opset1::Subtract>(node) || ov::is_type<ov::opset1::Divide>(node)) &&
return ov::is_type_any_of<ov::opset1::Add, ov::opset1::Multiply, ov::opset1::Subtract, ov::opset1::Divide>(node) &&
isBroadcastableToDataInput();
}

Expand All @@ -131,15 +136,18 @@ inline bool canBeMatMulExecutedInInt8(const ov::element::Type& firstType, const

bool SupportsFusingWithConvolution_Simple(const std::shared_ptr<const Node>& node,
const int channelAxis = DEFAULT_AXIS) {
return SupportsFusingWithConvolution_SumActivation(node) || ov::is_type<ov::op::v0::Tanh>(node) ||
ov::is_type<ov::op::v0::Gelu>(node) || ov::is_type<ov::op::v7::Gelu>(node) ||
ov::is_type<ov::op::v0::Abs>(node) || ov::is_type<ov::op::v0::Sqrt>(node) ||
ov::is_type<ov::op::v0::FakeQuantize>(node) || canBePerformedAsScaleShift(node, channelAxis);
return SupportsFusingWithConvolution_SumActivation(node) ||
ov::is_type_any_of<ov::op::v0::Tanh,
ov::op::v0::Gelu,
ov::op::v7::Gelu,
ov::op::v0::Abs,
ov::op::v0::Sqrt,
ov::op::v0::FakeQuantize>(node) ||
canBePerformedAsScaleShift(node, channelAxis);
}
// Convolution is a special case, since it supports peculiar fusings
bool isSuitableConvolutionParent(const std::shared_ptr<const Node>& node) {
const bool is_suitable_node =
ov::is_type<ov::op::v1::Convolution>(node) || ov::is_type<ov::op::v1::GroupConvolution>(node);
const bool is_suitable_node = ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution>(node);
// has a single output, connected to a single child
const auto out = node->outputs();
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
Expand Down Expand Up @@ -168,14 +176,18 @@ int getChannelAxis(const ov::AxisSet& axes, bool keep_dims) {
return channelAxis;
}
bool isSuitableMiscParent(const std::shared_ptr<const Node>& node) {
const bool is_suitable_node =
ov::is_type<ov::op::v0::MVN>(node) || ov::is_type<ov::op::v6::MVN>(node) ||
ov::is_type<ov::op::v0::NormalizeL2>(node) || ov::is_type<ov::op::v0::Interpolate>(node) ||
ov::is_type<ov::op::v4::Interpolate>(node) || ov::is_type<ov::op::v0::LSTMCell>(node) ||
ov::is_type<ov::op::v4::LSTMCell>(node) || ov::is_type<ov::opset1::ConvolutionBackpropData>(node) ||
ov::is_type<ov::op::util::ArithmeticReductionKeepDims>(node) ||
ov::is_type<ov::opset1::GroupConvolutionBackpropData>(node) || ov::is_type<ov::opset1::AvgPool>(node) ||
ov::is_type<ov::op::v14::AvgPool>(node);
const bool is_suitable_node = ov::is_type_any_of<ov::op::v0::MVN,
ov::op::v6::MVN,
ov::op::v0::NormalizeL2,
ov::op::v0::Interpolate,
ov::op::v4::Interpolate,
ov::op::v0::LSTMCell,
ov::op::v4::LSTMCell,
ov::opset1::ConvolutionBackpropData,
ov::op::util::ArithmeticReductionKeepDims,
ov::opset1::GroupConvolutionBackpropData,
ov::opset1::AvgPool,
ov::op::v14::AvgPool>(node);
// has a single output, connected to a single child
const auto out = node->outputs();
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
Expand Down Expand Up @@ -307,9 +319,11 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr<const Node>& node,

// MatMul specific checks from ::canFuse()
if (one_of(updatedChainType, NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8)) {
const auto is_binary_eltwise = ov::is_type<ov::op::v1::Add>(node) || ov::is_type<ov::op::v1::Multiply>(node) ||
ov::is_type<ov::op::v1::Subtract>(node) ||
ov::is_type<ov::op::v1::Divide>(node) || ov::is_type<ov::op::v0::PRelu>(node);
const auto is_binary_eltwise = ov::is_type_any_of<ov::op::v1::Add,
ov::op::v1::Multiply,
ov::op::v1::Subtract,
ov::op::v1::Divide,
ov::op::v0::PRelu>(node);
const auto rank = node->get_output_partial_shape(0).rank();
if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && rank.is_static() && is_binary_eltwise) {
const auto const1 = ov::is_type<ov::op::v0::Constant>(node->get_input_node_shared_ptr(0));
Expand Down Expand Up @@ -490,8 +504,7 @@ bool isSuitableConvert(const std::shared_ptr<const Node>& node) {
}

auto is_skipped_op(const std::shared_ptr<ov::Node>& op) -> bool {
return ov::is_type<ov::op::v0::Constant>(op) || ov::is_type<ov::op::v0::Parameter>(op) ||
ov::is_type<ov::op::v0::Result>(op);
return ov::is_type_any_of<ov::op::v0::Constant, ov::op::v0::Parameter, ov::op::v0::Result>(op);
}
} // namespace

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ namespace ov::intel_cpu::tpp::op {
#define UNARY_AUX_METHODS(UNARY_OP) GENERAL_AUX_METHODS(UNARY_OP, UnaryEltwiseTPP, new_args.at(0))

bool EltwiseTPP::is_supported(const std::shared_ptr<ov::Node>& node) {
return ov::is_type<ov::op::v1::Add>(node) || ov::is_type<ov::op::v1::Subtract>(node) ||
ov::is_type<ov::op::v1::Multiply>(node) || ov::is_type<ov::op::v1::Divide>(node);
return ov::is_type_any_of<ov::op::v1::Add, ov::op::v1::Subtract, ov::op::v1::Multiply, ov::op::v1::Divide>(node);
}

bool EltwiseTPP::visit_attributes(AttributeVisitor& visitor) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

#include "descriptor.hpp"
#include "modifiers.hpp"
#include "openvino/core/type_util.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/exp.hpp"
Expand Down
Loading
Loading