From 42cb92ee1f25d580be25aaaa611823aba5877378 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Tue, 14 Jan 2025 20:32:35 +0100 Subject: [PATCH] [RTTI] Replace std::dynamic_(pointer)?_casts with ov::as_type_(ptr)? - FEs (#28397) ### Details: - Replaced `std::dynamic_cast` and `std::dynamic_pointed_cast` with `ov::as_type` or `ov::as_type_ptr` respectively in src/frontends and src/tests directories, where applicable. ### Tickets: - CVS-160241 --------- Signed-off-by: Tomasz Jankowski --- src/core/include/openvino/core/type.hpp | 2 +- src/frontends/ir/src/ir_deserializer.cpp | 14 ++++---- src/frontends/jax/src/node_context.cpp | 2 +- .../onnx/frontend/src/core/null_node.cpp | 2 +- .../src/op/com.microsoft/matmulnbits.cpp | 4 +-- .../onnx/frontend/src/utils/common.cpp | 8 ++--- .../onnx/frontend/src/utils/onnx_internal.cpp | 6 ++-- src/frontends/onnx/tests/conversion.cpp | 2 +- .../onnx/tests/convert_partially_tests.cpp | 2 +- .../onnx/tests/onnx_import_convpool.in.cpp | 2 +- .../onnx/tests/onnx_tensor_names.cpp | 6 ++-- src/frontends/paddle/src/frontend.cpp | 2 +- .../internal/pass/transform_fakequantize.cpp | 8 ++--- .../paddle/src/internal/pass/transform_if.cpp | 3 +- .../src/internal/pass/transform_while.cpp | 2 +- src/frontends/pytorch/src/frontend.cpp | 4 +-- .../pytorch/src/helper_ops/internal_op.hpp | 3 ++ .../src/helper_ops/packed_sequence.hpp | 4 +-- src/frontends/pytorch/src/node_context.cpp | 6 ++-- src/frontends/pytorch/src/op/arange.cpp | 3 +- src/frontends/pytorch/src/op/as_strided.cpp | 4 +-- src/frontends/pytorch/src/op/as_tensor.cpp | 6 ++-- src/frontends/pytorch/src/op/cat.cpp | 2 +- src/frontends/pytorch/src/op/convnd.cpp | 2 +- src/frontends/pytorch/src/op/linear.cpp | 4 +-- src/frontends/pytorch/src/op/linspace.cpp | 2 +- .../pytorch/src/op/list_construct.cpp | 4 +-- .../pytorch/src/op/quantized_convnd.cpp | 11 +++--- .../pytorch/src/op/quantized_linear.cpp | 3 +- src/frontends/pytorch/src/op/rand.cpp | 15 ++++---- .../pytorch/src/op/repeat_interleave.cpp | 2 +- src/frontends/pytorch/src/op/to.cpp | 6 ++-- .../src/transforms/aten_cat_replacer.cpp | 4 +-- .../aten_stack_list_construct_replacer.cpp | 2 +- .../pytorch/src/transforms/dict_resolver.cpp | 6 ++-- .../transforms/irfftn_complex_replacer.cpp | 4 +-- .../transforms/prim_list_unpack_replacer.cpp | 4 +-- .../src/transforms/remove_packing_ops.cpp | 2 +- .../src/transforms/rfftn_complex_replacer.cpp | 4 +-- .../softmax_reshape_elimination.hpp | 1 + .../transforms/string_equality_replacer.cpp | 10 +++--- .../torchfx_gptq_pattern_replacer.cpp | 36 ++++++++----------- .../src/transforms/tuple_unpack_replacer.cpp | 2 +- .../src/transforms/u4_block_repack.cpp | 18 +++++----- src/frontends/pytorch/src/utils.cpp | 13 ++++--- src/frontends/pytorch/src/utils_quantize.cpp | 9 +++-- src/frontends/tensorflow/src/frontend.cpp | 4 +-- .../uninitialized_variable_resolve.cpp | 2 +- .../tensorflow/src/translate_session.cpp | 10 +++--- .../tests/convert_tricky_models.cpp | 2 +- .../tensorflow/tests/convert_unsupported.cpp | 2 +- .../tensor_array_v3_replacer.cpp | 2 +- .../tensor_list_ops_resolver.cpp | 20 +++++------ .../tensorflow_lite/src/frontend.cpp | 3 +- .../frontend/shared/include/op_extension.hpp | 2 +- .../shared/src/cut_specific_model.cpp | 2 +- .../builtin_extensions.cpp | 4 +-- .../subgraphs_dumper/include/utils/model.hpp | 12 +++---- .../subgraphs_dumper/src/cache/op_cache.cpp | 22 ++++++------ .../src/matchers/single_op/convolutions.cpp | 4 +-- .../src/matchers/subgraph/fused_names.cpp | 6 ++-- .../matchers/subgraph/read_value_assign.cpp | 6 ++-- .../src/matchers/subgraph/repeat_pattern.cpp | 6 ++-- .../subgraphs_dumper/src/utils/model.cpp | 16 ++++----- .../subgraphs_dumper/src/utils/node.cpp | 2 +- .../subgraphs_dumper/tests/cache/op_cache.cpp | 6 ++-- .../src/utils/generate_static_shapes.cpp | 2 +- .../compiled_model/compiled_model_base.hpp | 4 +-- .../src/base/utils/calculate_thresholds.cpp | 2 +- .../src/single_op/comparison.cpp | 2 +- .../src/single_op/reverse_sequence.cpp | 2 +- .../quantized_convolution_backprop_data.cpp | 2 +- .../subgraph/quantized_group_convolution.cpp | 2 +- ...ntized_group_convolution_backprop_data.cpp | 2 +- .../ov_lpt_models/src/elementwise.cpp | 2 +- .../ov_helpers/ov_lpt_models/src/multiply.cpp | 4 +-- .../src/multiply_partial_function.cpp | 4 +-- .../src/graph_comparator.cpp | 8 ++--- .../common_test_utils/src/ov_test_utils.cpp | 2 +- .../src/summary/op_summary.cpp | 30 +++++++--------- 80 files changed, 220 insertions(+), 242 deletions(-) diff --git a/src/core/include/openvino/core/type.hpp b/src/core/include/openvino/core/type.hpp index ab5c1ca0510b69..4877b9ce02b251 100644 --- a/src/core/include/openvino/core/type.hpp +++ b/src/core/include/openvino/core/type.hpp @@ -85,7 +85,7 @@ typename std::enable_if< bool>::value, bool>::type is_type(Value value) { - return value->get_type_info().is_castable(Type::get_type_info_static()); + return value && value->get_type_info().is_castable(Type::get_type_info_static()); } /// Casts a Value* to a Type* if it is of type Type, nullptr otherwise diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index 3b549ec91714e5..33e77d147557b0 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -533,18 +533,18 @@ std::shared_ptr ov::XmlDeserializer::parse_function(const pugi::xml_n auto node = create_node(inputs, p.xml, weights, p.params); id_to_node[layer_id] = node; - if (const auto& parameter_node = std::dynamic_pointer_cast(node)) { + if (const auto& parameter_node = ov::as_type_ptr(node)) { io_map.inputs.insert({layer_id, func_nodes.parameters.size()}); func_nodes.parameters.emplace_back(parameter_node); } - if (const auto& result_node = std::dynamic_pointer_cast(node)) { + if (const auto& result_node = ov::as_type_ptr(node)) { io_map.outputs.insert({layer_id, func_nodes.results.size()}); func_nodes.results.emplace_back(result_node); } - if (const auto& sink = std::dynamic_pointer_cast(node)) { - auto subgraph_op = std::dynamic_pointer_cast(node); + if (const auto& sink = ov::as_type_ptr(node)) { + auto subgraph_op = ov::as_type_ptr(node); if (subgraph_op) { for (const auto& body_model : subgraph_op->get_functions()) { if (body_model->get_sinks().size()) { @@ -557,7 +557,7 @@ std::shared_ptr ov::XmlDeserializer::parse_function(const pugi::xml_n } } - if (const auto& read_value = std::dynamic_pointer_cast(node)) { + if (const auto& read_value = ov::as_type_ptr(node)) { variable_id_to_read_value[read_value->get_variable_id()] = read_value; } @@ -569,7 +569,7 @@ std::shared_ptr ov::XmlDeserializer::parse_function(const pugi::xml_n func_nodes.parameters, pugixml::get_str_attr(root, "name", "")); for (const auto& sink : func_nodes.sinks) { - if (const auto& assign = std::dynamic_pointer_cast(sink)) { + if (const auto& assign = ov::as_type_ptr(sink)) { assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id())); } } @@ -902,7 +902,7 @@ std::shared_ptr ov::XmlDeserializer::create_node(const std::vector(ovNode)) { + if (auto constant = ov::as_type_ptr(ovNode)) { constant->alloc_buffer_on_visit_attributes(false); } ovNode->set_arguments(inputs); diff --git a/src/frontends/jax/src/node_context.cpp b/src/frontends/jax/src/node_context.cpp index 93fbac80807958..f6a965b258fff4 100644 --- a/src/frontends/jax/src/node_context.cpp +++ b/src/frontends/jax/src/node_context.cpp @@ -197,7 +197,7 @@ Any NodeContext::get_values_from_const_input(int index) const { index, " does not exist."); auto input_val = get_input(index); - if (auto input = std::dynamic_pointer_cast(input_val.get_node_shared_ptr())) { + if (auto input = ov::as_type_ptr(input_val.get_node_shared_ptr())) { const auto& attrs = input->get_attrs(); if (attrs.find("none_value") != attrs.end()) { return {}; diff --git a/src/frontends/onnx/frontend/src/core/null_node.cpp b/src/frontends/onnx/frontend/src/core/null_node.cpp index e595c4dd8f5c96..2f847f7d6d309f 100644 --- a/src/frontends/onnx/frontend/src/core/null_node.cpp +++ b/src/frontends/onnx/frontend/src/core/null_node.cpp @@ -19,7 +19,7 @@ std::shared_ptr NullNode::clone_with_new_inputs(const ov::OutputVector } // namespace ov bool ov::op::util::is_null(const ov::Node* node) { - return dynamic_cast(node) != nullptr; + return ov::as_type(node) != nullptr; } bool ov::op::util::is_null(const std::shared_ptr& node) { diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/matmulnbits.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/matmulnbits.cpp index fd3bc1b655c039..3c71f1c8985187 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/matmulnbits.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/matmulnbits.cpp @@ -53,7 +53,7 @@ ov::OutputVector matmulnbits(const ov::frontend::onnx::Node& node) { CHECK_VALID_NODE(node, blob_size > 0, "Wrong blob size: ", blob_size); // in documentation: ...Input B is a 2D constant Matrix. CHECK_VALID_NODE(node, - dynamic_cast(b_quantized.get_node()) != nullptr, + ov::as_type(b_quantized.get_node()) != nullptr, "MatMulNBits limitation: accepting only a constant as a B input"); CHECK_VALID_NODE(node, b_quantized.get_partial_shape().rank() == 3, @@ -112,7 +112,7 @@ ov::OutputVector matmulnbits(const ov::frontend::onnx::Node& node) { } { - const auto b_const = std::dynamic_pointer_cast(b_quantized.get_node_shared_ptr()); + const auto b_const = ov::as_type_ptr(b_quantized.get_node_shared_ptr()); ov::Output casted_b; ov::Shape casted_b_shape; diff --git a/src/frontends/onnx/frontend/src/utils/common.cpp b/src/frontends/onnx/frontend/src/utils/common.cpp index e15b0c0bcda4fd..041ada73f9c387 100644 --- a/src/frontends/onnx/frontend/src/utils/common.cpp +++ b/src/frontends/onnx/frontend/src/utils/common.cpp @@ -221,7 +221,7 @@ bool collect_translation_exceptions(const std::shared_ptr& partially_ }; for (const auto& node : partially_converted->get_ordered_ops()) { - if (const auto& fw_node = std::dynamic_pointer_cast(node)) { + if (const auto& fw_node = ov::as_type_ptr(node)) { const auto& attrs = fw_node->get_attrs(); auto node_name = attrs.get_opset_name() + "." + attrs.get_type_name(); if (unsupported_operations->count(node_name) > 0) { @@ -230,7 +230,7 @@ bool collect_translation_exceptions(const std::shared_ptr& partially_ print_unsupported(fw_node); unsupported_operations->insert(node_name); - } else if (const auto& fw_node = std::dynamic_pointer_cast(node)) { + } else if (const auto& fw_node = ov::as_type_ptr(node)) { const auto& attrs = fw_node->get_attrs(); if (fw_node->additional_error_message().empty()) { @@ -248,7 +248,7 @@ bool collect_translation_exceptions(const std::shared_ptr& partially_ failures->insert(node_fail); } - } else if (const auto& if_node = std::dynamic_pointer_cast(node)) { + } else if (const auto& if_node = ov::as_type_ptr(node)) { collect_translation_exceptions(if_node->get_then_body(), telemetry, output_stream, @@ -259,7 +259,7 @@ bool collect_translation_exceptions(const std::shared_ptr& partially_ output_stream, unsupported_operations, failures); - } else if (const auto& loop_node = std::dynamic_pointer_cast(node)) { + } else if (const auto& loop_node = ov::as_type_ptr(node)) { collect_translation_exceptions(loop_node->get_function(), telemetry, output_stream, diff --git a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp index ebf34eb5863905..18edc12d61952a 100644 --- a/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp +++ b/src/frontends/onnx/frontend/src/utils/onnx_internal.cpp @@ -31,7 +31,7 @@ void remove_dangling_parameters(std::shared_ptr& model) { std::all_of(parameter_users.begin(), parameter_users.end(), [](const std::shared_ptr& node) -> bool { - return std::dynamic_pointer_cast(node) != nullptr; + return ov::as_type_ptr(node) != nullptr; }); if (is_dangling_parameter) { model->remove_parameter(parameter); @@ -69,8 +69,8 @@ void convert_decoded_model(std::shared_ptr model) { "' attribute in decoded model. Model probably wasn't created by FrontEnd::decode function."); auto onnx_graph = it->second.as>(); for (const auto& node : model->get_ordered_ops()) { - if (auto raw_node = std::dynamic_pointer_cast(node)) { - if (auto subgraph_node = std::dynamic_pointer_cast(node)) { + if (auto raw_node = ov::as_type_ptr(node)) { + if (auto subgraph_node = ov::as_type_ptr(node)) { subgraph_node->infer_inputs_from_parent(); for (auto& model : subgraph_node->get_subgraph_models()) { convert_decoded_model(model); diff --git a/src/frontends/onnx/tests/conversion.cpp b/src/frontends/onnx/tests/conversion.cpp index c837fa394ce431..237712e60b2725 100644 --- a/src/frontends/onnx/tests/conversion.cpp +++ b/src/frontends/onnx/tests/conversion.cpp @@ -69,7 +69,7 @@ TEST(ONNXConversionExtensionTest, custom_op_with_custom_domain) { OV_ASSERT_NO_THROW(model = onnx::tests::convert_model("missing_op_domain.onnx", ext)); for (const auto& op : model->get_ops()) { - if (const auto& add = std::dynamic_pointer_cast(op)) { + if (const auto& add = ov::as_type_ptr(op)) { EXPECT_TRUE(add->get_rt_info().count("added_by_extension") == 1); return; } diff --git a/src/frontends/onnx/tests/convert_partially_tests.cpp b/src/frontends/onnx/tests/convert_partially_tests.cpp index 290bb4d7298a9c..0409d73cb860ee 100644 --- a/src/frontends/onnx/tests/convert_partially_tests.cpp +++ b/src/frontends/onnx/tests/convert_partially_tests.cpp @@ -19,7 +19,7 @@ namespace { std::shared_ptr get_framework_node_with_out_name(const std::shared_ptr& model, const std::string& out_name) { for (const auto& op : model->get_ops()) { - if (auto framework_node = std::dynamic_pointer_cast(op)) { + if (auto framework_node = ov::as_type_ptr(op)) { for (const auto& out : op->outputs()) { if (out.get_any_name() == out_name) { return framework_node; diff --git a/src/frontends/onnx/tests/onnx_import_convpool.in.cpp b/src/frontends/onnx/tests/onnx_import_convpool.in.cpp index 3d34a40554752b..8080ff178bf79f 100644 --- a/src/frontends/onnx/tests/onnx_import_convpool.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_convpool.in.cpp @@ -334,7 +334,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_max_pool_empty_auto_pad) { const auto model = convert_model("max_pool_empty_auto_pad.onnx"); for (const auto& op : model->get_ops()) { - if (const auto max_pool = std::dynamic_pointer_cast(op)) { + if (const auto max_pool = ov::as_type_ptr(op)) { EXPECT_EQ(max_pool->get_auto_pad(), op::PadType::EXPLICIT); return; } diff --git a/src/frontends/onnx/tests/onnx_tensor_names.cpp b/src/frontends/onnx/tests/onnx_tensor_names.cpp index d66d6766f87dd0..933bb12cde1d76 100644 --- a/src/frontends/onnx/tests/onnx_tensor_names.cpp +++ b/src/frontends/onnx/tests/onnx_tensor_names.cpp @@ -29,7 +29,7 @@ bool matching_node_found_in_graph(const std::vector& ops, const std::unordered_set& output_names, int out_tensor_number = 0) { return std::any_of(std::begin(ops), std::end(ops), [&](const DerivedFromNode op) { - if (const std::shared_ptr casted = std::dynamic_pointer_cast(op)) { + if (const std::shared_ptr casted = ov::as_type_ptr(op)) { const auto& op_friendly_name = casted->get_friendly_name(); const auto& op_output_names = casted->get_output_tensor(out_tensor_number).get_names(); if (op_friendly_name == friendly_name && op_output_names == output_names) { @@ -44,11 +44,11 @@ template std::shared_ptr find_by_friendly_name(const std::vector& ops, const std::string& friendly_name) { const auto it = std::find_if(std::begin(ops), std::end(ops), [&friendly_name](const DerivedFromNode& op) { - return op->get_friendly_name() == friendly_name && std::dynamic_pointer_cast(op) != nullptr; + return op->get_friendly_name() == friendly_name && ov::as_type_ptr(op) != nullptr; }); if (it != std::end(ops)) { - return std::dynamic_pointer_cast(*it); + return ov::as_type_ptr(*it); } else { return nullptr; } diff --git a/src/frontends/paddle/src/frontend.cpp b/src/frontends/paddle/src/frontend.cpp index 4081f59e132b0d..22d5547489e723 100644 --- a/src/frontends/paddle/src/frontend.cpp +++ b/src/frontends/paddle/src/frontend.cpp @@ -492,7 +492,7 @@ std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const void FrontEnd::convert(const std::shared_ptr& partiallyConverted) const { for (const auto& node : partiallyConverted->get_ordered_ops()) { if (ov::is_type(node)) { - paddle::normalize_framework_node(std::dynamic_pointer_cast(node), m_op_translators); + paddle::normalize_framework_node(ov::as_type_ptr(node), m_op_translators); } } for (const auto& result : partiallyConverted->get_results()) { diff --git a/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp b/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp index 93c8d632292f3c..4ab7557c4be2cb 100644 --- a/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp +++ b/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp @@ -71,20 +71,20 @@ ov::frontend::paddle::pass::TransformFakeQuantize::TransformFakeQuantize() { // check round mode // Fallback to the PDPD FE if the round_mode is HALF_AWAY_FROM_ZERO. - const auto& round_node_cast = std::dynamic_pointer_cast(opsMap.at(round_label).get_node_shared_ptr()); + const auto& round_node_cast = ov::as_type_ptr(opsMap.at(round_label).get_node_shared_ptr()); if (!round_node_cast || round_node_cast->get_mode() != Round::RoundMode::HALF_TO_EVEN) { return false; } // check quantize_linear zero_point - auto zp_node_cast = std::dynamic_pointer_cast(opsMap.at(dq_zp_label).get_node_shared_ptr()); + auto zp_node_cast = ov::as_type_ptr(opsMap.at(dq_zp_label).get_node_shared_ptr()); float zp; if (!zp_node_cast || !ov::op::util::get_single_value(zp_node_cast, zp)) { return false; } // prepare levels - const auto& clamp_node_cast = std::dynamic_pointer_cast(opsMap.at(q_clamp_label).get_node_shared_ptr()); + const auto& clamp_node_cast = ov::as_type_ptr(opsMap.at(q_clamp_label).get_node_shared_ptr()); if (!clamp_node_cast) { return false; } @@ -93,7 +93,7 @@ ov::frontend::paddle::pass::TransformFakeQuantize::TransformFakeQuantize() { const auto levels = high_range - low_range + 1; // get the scale - const auto& scale_node_cast = std::dynamic_pointer_cast( + const auto& scale_node_cast = ov::as_type_ptr( opsMap.at(q_real_scale_label).get_node_shared_ptr()->get_input_node_shared_ptr(0)); float scale; if (!scale_node_cast || !ov::op::util::get_single_value(scale_node_cast, scale)) { diff --git a/src/frontends/paddle/src/internal/pass/transform_if.cpp b/src/frontends/paddle/src/internal/pass/transform_if.cpp index 3d96154e5213e1..cfda9f6cbd6c9f 100644 --- a/src/frontends/paddle/src/internal/pass/transform_if.cpp +++ b/src/frontends/paddle/src/internal/pass/transform_if.cpp @@ -23,8 +23,7 @@ ov::frontend::paddle::pass::TransformIf::TransformIf(std::vector(); matcher_pass_callback callback = [funcs](pattern::Matcher& m) -> bool { - const auto conditional_block = - std::dynamic_pointer_cast(m.get_match_root()); + const auto conditional_block = ov::as_type_ptr(m.get_match_root()); const auto mask_idx = conditional_block->get_input_size() - 1; const auto cond = conditional_block->get_input_node_shared_ptr(mask_idx); diff --git a/src/frontends/paddle/src/internal/pass/transform_while.cpp b/src/frontends/paddle/src/internal/pass/transform_while.cpp index cacc601ddc8214..702d9fd5c83cde 100644 --- a/src/frontends/paddle/src/internal/pass/transform_while.cpp +++ b/src/frontends/paddle/src/internal/pass/transform_while.cpp @@ -29,7 +29,7 @@ ov::frontend::paddle::pass::TransformWhile::TransformWhile(std::vector(); matcher_pass_callback callback = [functions](pattern::Matcher& m) -> bool { - const auto& while_node = std::dynamic_pointer_cast(m.get_match_root()); + const auto& while_node = ov::as_type_ptr(m.get_match_root()); if (!while_node) return false; const auto& inputs = while_node->input_values(); diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 69048d4798e788..04ba9a9c92c281 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -183,7 +183,7 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr& mo auto place = inputs[i]; if (place->get_names().size() != 0 && input_names.find(place->get_names().at(0)) != input_names.end()) { auto input = converted_model->input(place->get_names().at(0)); - auto param = std::dynamic_pointer_cast(input.get_node_shared_ptr()); + auto param = ov::as_type_ptr(input.get_node_shared_ptr()); FRONT_END_GENERAL_CHECK(param, "Input is not a Parameter."); update_parameter_info(param, place, converted_model); } else { @@ -205,7 +205,7 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr& mo update_parameter_info(parameters[idx], fplace, converted_model); } else { auto input = converted_model->input(fplace->get_names().at(0)); - auto param = std::dynamic_pointer_cast(input.get_node_shared_ptr()); + auto param = ov::as_type_ptr(input.get_node_shared_ptr()); FRONT_END_GENERAL_CHECK(param, "Input is not a Parameter."); update_parameter_info(param, fplace, converted_model); } diff --git a/src/frontends/pytorch/src/helper_ops/internal_op.hpp b/src/frontends/pytorch/src/helper_ops/internal_op.hpp index 54657a765f4338..f840ff856d4fd0 100644 --- a/src/frontends/pytorch/src/helper_ops/internal_op.hpp +++ b/src/frontends/pytorch/src/helper_ops/internal_op.hpp @@ -41,6 +41,9 @@ class InternalOpDecoder : public DummyDecoder { }; class InternalOperation : public PtFrameworkNode { +public: + OPENVINO_OP("InternalOperation", "util", PtFrameworkNode); + protected: InternalOperation(const std::string& op_type, const OutputVector& inputs, diff --git a/src/frontends/pytorch/src/helper_ops/packed_sequence.hpp b/src/frontends/pytorch/src/helper_ops/packed_sequence.hpp index d947ed735adcb2..9766346fbff563 100644 --- a/src/frontends/pytorch/src/helper_ops/packed_sequence.hpp +++ b/src/frontends/pytorch/src/helper_ops/packed_sequence.hpp @@ -13,7 +13,7 @@ namespace pytorch { class PackPadded : public InternalOperation { public: - OPENVINO_OP("PackPadded", "util", ov::op::util::FrameworkNode); + OPENVINO_OP("PackPadded", "util", InternalOperation); PackPadded(const Output& input, const Output& lengths) : InternalOperation("prim::PackPadded", {input, lengths}, 2, "This is PackedSequence pack operation.") { validate_and_infer_types(); @@ -27,7 +27,7 @@ class PackPadded : public InternalOperation { class PadPacked : public InternalOperation { public: - OPENVINO_OP("PadPacked", "util", ov::op::util::FrameworkNode); + OPENVINO_OP("PadPacked", "util", InternalOperation); PadPacked(const Output& input, const Output& lengths) : InternalOperation("prim::PadPacked", {input, lengths}, 2, "This is PackedSequence unpack operation.") { validate_and_infer_types(); diff --git a/src/frontends/pytorch/src/node_context.cpp b/src/frontends/pytorch/src/node_context.cpp index bd3d7bc89c57f4..8edd353adb4599 100644 --- a/src/frontends/pytorch/src/node_context.cpp +++ b/src/frontends/pytorch/src/node_context.cpp @@ -111,7 +111,7 @@ Output NodeContext::get_input_from_visible_context(size_t index) const { FRONT_END_GENERAL_CHECK(index < get_input_size(), "Index ", index, " is lower then number of inputs."); auto input_tensor = get_input(static_cast(index)); auto input_node = input_tensor.get_node_shared_ptr(); - if (std::dynamic_pointer_cast(input_node)) { + if (ov::as_type_ptr(input_node)) { // We need to look into external context for inputs that would be feed into this parameter size_t tensor_idx = m_translate_session->decode_tensor_name(input_node->output(0)); if (m_ext_tensor_map.count(tensor_idx)) { @@ -298,7 +298,7 @@ template <> std::string NodeContext::const_input(size_t index) const { FRONT_END_GENERAL_CHECK(!input_is_none(index), "Input with index: ", index, " is none."); auto input_node = get_input_from_visible_context(index).get_node_shared_ptr(); - auto input = std::dynamic_pointer_cast(input_node); + auto input = ov::as_type_ptr(input_node); FRONT_END_GENERAL_CHECK(input, "Input node with index ", index, @@ -327,7 +327,7 @@ Any NodeContext::get_values_from_const_input(int index) const { if (input_is_none(index)) return {}; auto input_val = get_input_from_visible_context(index); - if (auto input = std::dynamic_pointer_cast(input_val.get_node_shared_ptr())) { + if (auto input = ov::as_type_ptr(input_val.get_node_shared_ptr())) { const auto& attrs = input->get_attrs(); if (attrs.find("none_value") != attrs.end()) { return {}; diff --git a/src/frontends/pytorch/src/op/arange.cpp b/src/frontends/pytorch/src/op/arange.cpp index 6725db7c90b267..e20d8171053975 100644 --- a/src/frontends/pytorch/src/op/arange.cpp +++ b/src/frontends/pytorch/src/op/arange.cpp @@ -64,8 +64,7 @@ OutputVector translate_arange(const NodeContext& context) { PYTORCH_OP_CONVERSION_CHECK(false, "Not expected number of inputs for ", context.get_op_type()); } if (dtype_port >= 0 && !context.input_is_none(dtype_port)) { - if (std::dynamic_pointer_cast( - context.get_input_from_visible_context(dtype_port).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(dtype_port).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(dtype_port)); dtype_applied = true; } else if (const auto& fw_node = diff --git a/src/frontends/pytorch/src/op/as_strided.cpp b/src/frontends/pytorch/src/op/as_strided.cpp index 00a64b09e7bedf..5079766b4a1af0 100644 --- a/src/frontends/pytorch/src/op/as_strided.cpp +++ b/src/frontends/pytorch/src/op/as_strided.cpp @@ -92,7 +92,7 @@ OutputVector translate_as_strided(const NodeContext& context) { std::deque> sizes; std::deque> strides; - if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(1).get_node_shared_ptr())) { auto input_vector = context.const_input>(1); std::for_each(input_vector.rbegin(), input_vector.rend(), [&](int64_t input_val) { auto const_input = context.mark_node(v0::Constant::create(element::i32, Shape{}, {input_val})); @@ -101,7 +101,7 @@ OutputVector translate_as_strided(const NodeContext& context) { } else { sizes = get_list_as_outputs(context.get_input(1)); } - if (std::dynamic_pointer_cast(context.get_input_from_visible_context(2).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(2).get_node_shared_ptr())) { auto input_vector = context.const_input>(2); std::for_each(input_vector.rbegin(), input_vector.rend(), [&](int64_t input_val) { auto const_input = context.mark_node(v0::Constant::create(element::i32, Shape{}, {input_val})); diff --git a/src/frontends/pytorch/src/op/as_tensor.cpp b/src/frontends/pytorch/src/op/as_tensor.cpp index fe447c544edb6b..9c3e4c026606a4 100644 --- a/src/frontends/pytorch/src/op/as_tensor.cpp +++ b/src/frontends/pytorch/src/op/as_tensor.cpp @@ -28,14 +28,14 @@ OutputVector translate_as_tensor(const NodeContext& context) { auto list_elems = get_list_as_outputs(data); if (!context.input_is_none(1)) { auto dtype_ext_node = context.get_input_from_visible_context(1).get_node_shared_ptr(); - auto dtype_fw_node = std::dynamic_pointer_cast(dtype_ext_node); + auto dtype_fw_node = ov::as_type_ptr(dtype_ext_node); if (dtype_fw_node && dtype_fw_node->get_op_type() == "prim::dtype") { auto type_input = dtype_fw_node->input_value(0); std::for_each(list_elems.begin(), list_elems.end(), [&](Output& n) { n = context.mark_node(std::make_shared(n, type_input)); }); } - if (auto dtype_const = std::dynamic_pointer_cast(dtype_ext_node)) { + if (auto dtype_const = ov::as_type_ptr(dtype_ext_node)) { auto pt_type = dtype_const->cast_vector()[0]; dtype = convert_dtype(pt_type); std::for_each(list_elems.begin(), list_elems.end(), [&](Output& n) { @@ -59,4 +59,4 @@ OutputVector translate_as_tensor(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/cat.cpp b/src/frontends/pytorch/src/op/cat.cpp index 9a6048d39044fc..5f620cc7b703c8 100644 --- a/src/frontends/pytorch/src/op/cat.cpp +++ b/src/frontends/pytorch/src/op/cat.cpp @@ -43,7 +43,7 @@ OutputVector translate_cat_common(const NodeContext& context, "::cat is located inside body while inputs are located outside of the body. " "This case is not supported."); if (list_elems.size() == 1 && - !std::dynamic_pointer_cast(context.get_input(0).get_node_shared_ptr()) && !is_fx) { + !ov::as_type_ptr(context.get_input(0).get_node_shared_ptr()) && !is_fx) { // Case when list was merged into tensor. // This case doesn't work with torchfx auto tensor = list_elems[0]; auto shape = context.mark_node(std::make_shared(tensor, element::i32)); diff --git a/src/frontends/pytorch/src/op/convnd.cpp b/src/frontends/pytorch/src/op/convnd.cpp index 78a78f23bc532d..ca3dcc77114ccb 100644 --- a/src/frontends/pytorch/src/op/convnd.cpp +++ b/src/frontends/pytorch/src/op/convnd.cpp @@ -53,7 +53,7 @@ OutputVector translate_convnd(const NodeContext& context) { if (!context.input_is_none(2)) { auto bias = context.get_input(2); auto bias_from_visible_context = context.get_input_from_visible_context(2); - if (std::dynamic_pointer_cast(bias_from_visible_context.get_node_shared_ptr())) { + if (ov::as_type_ptr(bias_from_visible_context.get_node_shared_ptr())) { bias = bias_from_visible_context; } auto bias_rank = bias.get_partial_shape().rank(); diff --git a/src/frontends/pytorch/src/op/linear.cpp b/src/frontends/pytorch/src/op/linear.cpp index 5472507d75cc2f..c6e345f70a9da7 100644 --- a/src/frontends/pytorch/src/op/linear.cpp +++ b/src/frontends/pytorch/src/op/linear.cpp @@ -60,7 +60,7 @@ uint32_t rearrange_awq_bits(uint32_t num) { } Output rearrange_constant(const Output& c, uint32_t groups) { - auto constant = std::dynamic_pointer_cast(c.get_node_shared_ptr()); + auto constant = ov::as_type_ptr(c.get_node_shared_ptr()); FRONT_END_OP_CONVERSION_CHECK(constant, "weight must be Constant."); auto src = constant->get_data_ptr(); auto initial_shape = constant->get_shape(); @@ -118,4 +118,4 @@ OutputVector translate_linear_awq(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/linspace.cpp b/src/frontends/pytorch/src/op/linspace.cpp index 39fd2d5e7a8813..36319099a0d37a 100644 --- a/src/frontends/pytorch/src/op/linspace.cpp +++ b/src/frontends/pytorch/src/op/linspace.cpp @@ -37,7 +37,7 @@ OutputVector translate_linspace(const NodeContext& context) { auto dtype = element::f32; if (!context.input_is_none(3) && context.get_input_size() == 7) { // Case where dtype is provided directly in dtype input. - if (std::dynamic_pointer_cast(context.get_input_from_visible_context(3).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(3).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(3)); apply_dtype = true; } else if (const auto& fw_node = cast_fw_node(context.get_input(3).get_node_shared_ptr(), "prim::dtype")) { diff --git a/src/frontends/pytorch/src/op/list_construct.cpp b/src/frontends/pytorch/src/op/list_construct.cpp index 8916eeddb62121..15e87e1ca80e16 100644 --- a/src/frontends/pytorch/src/op/list_construct.cpp +++ b/src/frontends/pytorch/src/op/list_construct.cpp @@ -21,7 +21,7 @@ OutputVector translate_list_construct(const NodeContext& context) { ov::OutputVector consts; for (size_t i = 0; i < context.get_input_size(); i++) { auto input = context.get_input_from_visible_context(i); - auto c_node = std::dynamic_pointer_cast(input.get_node_shared_ptr()); + auto c_node = ov::as_type_ptr(input.get_node_shared_ptr()); PYTORCH_OP_CONVERSION_CHECK(c_node, "Translation for prim::ListConstruct support only constant inputs"); if (c_node->get_shape().size() == 0) { c_node = std::make_shared(c_node->get_element_type(), Shape{1}, c_node->get_data_ptr()); @@ -45,4 +45,4 @@ OutputVector translate_list_construct(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/quantized_convnd.cpp b/src/frontends/pytorch/src/op/quantized_convnd.cpp index 523c2fe65b07ee..bbdbf0da4d7bba 100644 --- a/src/frontends/pytorch/src/op/quantized_convnd.cpp +++ b/src/frontends/pytorch/src/op/quantized_convnd.cpp @@ -21,8 +21,7 @@ using namespace ov::op; namespace { Output translate_quantized_convnd_base(const NodeContext& context) { auto input = context.get_input(0); - auto packed_params_node = - std::dynamic_pointer_cast(context.get_input(1).get_node_shared_ptr()); + auto packed_params_node = ov::as_type_ptr(context.get_input(1).get_node_shared_ptr()); PYTORCH_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); const auto& attrs = packed_params_node->get_attrs(); PYTORCH_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), @@ -36,13 +35,13 @@ Output translate_quantized_convnd_base(const NodeContext& context) { // Packed params: weight, bias, stride, padding, dilation, groups auto weight = packed_params[0].get_source_output(); auto bias = packed_params[1].get_source_output(); - auto strides = std::dynamic_pointer_cast(packed_params[2].get_source_output().get_node_shared_ptr()) + auto strides = ov::as_type_ptr(packed_params[2].get_source_output().get_node_shared_ptr()) ->cast_vector(); - auto pads = std::dynamic_pointer_cast(packed_params[3].get_source_output().get_node_shared_ptr()) + auto pads = ov::as_type_ptr(packed_params[3].get_source_output().get_node_shared_ptr()) ->cast_vector(); - auto dilations = std::dynamic_pointer_cast(packed_params[4].get_source_output().get_node_shared_ptr()) + auto dilations = ov::as_type_ptr(packed_params[4].get_source_output().get_node_shared_ptr()) ->cast_vector(); - int64_t groups = std::dynamic_pointer_cast(packed_params[5].get_source_output().get_node_shared_ptr()) + int64_t groups = ov::as_type_ptr(packed_params[5].get_source_output().get_node_shared_ptr()) ->cast_vector()[0]; auto pad_type = ov::op::PadType::EXPLICIT; diff --git a/src/frontends/pytorch/src/op/quantized_linear.cpp b/src/frontends/pytorch/src/op/quantized_linear.cpp index 609f33708f2c9c..3a3ac52d14b059 100644 --- a/src/frontends/pytorch/src/op/quantized_linear.cpp +++ b/src/frontends/pytorch/src/op/quantized_linear.cpp @@ -18,8 +18,7 @@ OutputVector translate_quantized_linear(const NodeContext& context) { // int Y_zero_point_i) -> Tensor Y" num_inputs_check(context, 4, 4); auto x = context.get_input(0); - auto packed_params_node = - std::dynamic_pointer_cast(context.get_input(1).get_node_shared_ptr()); + auto packed_params_node = ov::as_type_ptr(context.get_input(1).get_node_shared_ptr()); PYTORCH_OP_CONVERSION_CHECK(packed_params_node, "Packed params input node type is required to be FrameworkNode."); const auto& attrs = packed_params_node->get_attrs(); PYTORCH_OP_CONVERSION_CHECK((attrs.find(PtFrameworkNode::op_type_key) != attrs.end()), diff --git a/src/frontends/pytorch/src/op/rand.cpp b/src/frontends/pytorch/src/op/rand.cpp index 0779bf2bbcfaa8..cef77ee5811093 100644 --- a/src/frontends/pytorch/src/op/rand.cpp +++ b/src/frontends/pytorch/src/op/rand.cpp @@ -81,8 +81,7 @@ OutputVector translate_rand(const NodeContext& context) { dtype_id = 2; } if (!context.input_is_none(dtype_id)) { - if (std::dynamic_pointer_cast( - context.get_input_from_visible_context(dtype_id).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(dtype_id).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(dtype_id)); low = context.mark_node(std::make_shared(low, dtype)); high = context.mark_node(std::make_shared(high, dtype)); @@ -121,7 +120,7 @@ OutputVector translate_rand_like(const NodeContext& context) { bool dtype_applied = true; Output convert_like_out; if (!context.input_is_none(1)) { - if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(1).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(1)); low = context.mark_node(std::make_shared(low, dtype)); high = context.mark_node(std::make_shared(high, dtype)); @@ -177,8 +176,7 @@ OutputVector translate_randn(const NodeContext& context) { bool dtype_applied = true; Output convert_like_out; if (!context.input_is_none(dtype_id)) { - if (std::dynamic_pointer_cast( - context.get_input_from_visible_context(dtype_id).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(dtype_id).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(dtype_id)); } else if (const auto& fw_node = cast_fw_node(context.get_input(static_cast(dtype_id)).get_node_shared_ptr(), @@ -219,7 +217,7 @@ OutputVector translate_randn_like(const NodeContext& context) { bool dtype_applied = true; Output convert_like_out; if (!context.input_is_none(1)) { - if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(1).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(1)); } else if (const auto& fw_node = cast_fw_node(context.get_input(static_cast(1)).get_node_shared_ptr(), "prim::dtype")) { @@ -250,7 +248,7 @@ OutputVector translate_randint(const NodeContext& context) { bool dtype_applied = true; Output convert_like_out; if (!context.input_is_none(3)) { - if (std::dynamic_pointer_cast(context.get_input_from_visible_context(3).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(3).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(3)); } else if (const auto& fw_node = cast_fw_node(context.get_input(static_cast(3)).get_node_shared_ptr(), "prim::dtype")) { @@ -325,8 +323,7 @@ OutputVector translate_normal(const NodeContext& context) { Output convert_like_out; bool dtype_applied = true; if (!context.input_is_none(4)) { - if (std::dynamic_pointer_cast( - context.get_input_from_visible_context(3).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(3).get_node_shared_ptr())) { dtype = convert_dtype(context.const_input(4)); } else if (const auto& fw_node = cast_fw_node(context.get_input(3).get_node_shared_ptr(), "prim::dtype")) { convert_like_out = fw_node->input_value(0); diff --git a/src/frontends/pytorch/src/op/repeat_interleave.cpp b/src/frontends/pytorch/src/op/repeat_interleave.cpp index 79606417173a1d..b7bcb58ea0378a 100644 --- a/src/frontends/pytorch/src/op/repeat_interleave.cpp +++ b/src/frontends/pytorch/src/op/repeat_interleave.cpp @@ -48,7 +48,7 @@ OutputVector translate_repeat_interleave(const NodeContext& context) { std::shared_ptr result; auto repeats_ext_node = context.get_input_from_visible_context(1).get_node_shared_ptr(); - auto repeats_fw_node = std::dynamic_pointer_cast(repeats_ext_node); + auto repeats_fw_node = ov::as_type_ptr(repeats_ext_node); if (repeats_fw_node && repeats_fw_node->cast_vector().size() > 1) { // repeats is Constant with more then 1 element auto repeats = repeats_fw_node->cast_vector(); diff --git a/src/frontends/pytorch/src/op/to.cpp b/src/frontends/pytorch/src/op/to.cpp index 796dde380f861b..9d6525253d8c7a 100644 --- a/src/frontends/pytorch/src/op/to.cpp +++ b/src/frontends/pytorch/src/op/to.cpp @@ -23,7 +23,7 @@ OutputVector translate_to(const NodeContext& context) { // -> (Tensor(a)) dtype_idx = 1; auto node = context.get_input_from_visible_context(dtype_idx).get_node_shared_ptr(); - auto fw_node = std::dynamic_pointer_cast(node); + auto fw_node = ov::as_type_ptr(node); if (fw_node && fw_node->get_op_type() == "prim::device") { // Cast only to device without changing dtype. Return input node unchanged. return {context.get_input(0)}; @@ -66,12 +66,12 @@ OutputVector translate_to(const NodeContext& context) { // memory_format sets the desired memory format of returned Tensor. // memory format is ignored since it changes strides of a tensor. In openvino tensors are always contigious auto dtype_ext_node = context.get_input_from_visible_context(dtype_idx).get_node_shared_ptr(); - auto dtype_fw_node = std::dynamic_pointer_cast(dtype_ext_node); + auto dtype_fw_node = ov::as_type_ptr(dtype_ext_node); Output cast; if (dtype_fw_node && dtype_fw_node->get_op_type() == "prim::dtype") { auto type_input = dtype_fw_node->input_value(0); cast = context.mark_node(std::make_shared(context.get_input(0), type_input)); - } else if (const auto dtype_const = std::dynamic_pointer_cast(dtype_ext_node)) { + } else if (const auto dtype_const = ov::as_type_ptr(dtype_ext_node)) { auto pt_type = dtype_const->cast_vector()[0]; auto dtype = convert_dtype(pt_type); cast = context.mark_node(std::make_shared(context.get_input(0), dtype)); diff --git a/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp index a627db1c1187e3..692cac207034f0 100644 --- a/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/aten_cat_replacer.cpp @@ -64,7 +64,7 @@ AtenCatToConcat::AtenCatToConcat() { } std::shared_ptr input_node = cat->get_input_node_shared_ptr(0); - if (auto loop = std::dynamic_pointer_cast(input_node)) { + if (auto loop = ov::as_type_ptr(input_node)) { // case when concatenation is done inside the Loop auto body = loop->get_function(); auto output_index = cat->input(0).get_source_output().get_index(); @@ -84,7 +84,7 @@ AtenCatToConcat::AtenCatToConcat() { "::cat unsupported case: aten::append wasn't found inside prim::Loop body."); return false; } - auto param = std::dynamic_pointer_cast(append->get_input_node_shared_ptr(0)); + auto param = ov::as_type_ptr(append->get_input_node_shared_ptr(0)); if (!param) { add_exception_to_fw_node( cat, diff --git a/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.cpp index bbaa1d768bc971..1c9aa1e9911077 100644 --- a/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/aten_stack_list_construct_replacer.cpp @@ -37,7 +37,7 @@ AtenStackListConstructReplacer::AtenStackListConstructReplacer() { const auto& pattern_map = m.get_pattern_value_map(); const auto& input_node = pattern_map.at(list_construct).get_node_shared_ptr(); auto axis_node = pattern_map.at(axis).get_node_shared_ptr(); - auto axis_const = std::dynamic_pointer_cast(axis_node); + auto axis_const = ov::as_type_ptr(axis_node); auto axis = axis_const->cast_vector(); if (axis.size() != 1) { add_exception_to_fw_node(stack, "aten::stack has multiple axes, only one is supported."); diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.cpp b/src/frontends/pytorch/src/transforms/dict_resolver.cpp index d301e6b5553b14..25d5d3ba603cc5 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.cpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.cpp @@ -31,8 +31,8 @@ bool DictParameterResolver::run_on_model(const std::shared_ptr& model) { for (const auto inp : targets) { const auto getitem_node = cast_fw_node(inp.get_node()->shared_from_this(), "aten::__getitem__"); if (getitem_node) { - const auto index_node = std::dynamic_pointer_cast( - getitem_node->get_input_node_shared_ptr(1)); + const auto index_node = + ov::as_type_ptr(getitem_node->get_input_node_shared_ptr(1)); if (!index_node) { at_least_one_unused = true; continue; @@ -85,7 +85,7 @@ bool DictResultResolver::run_on_model(const std::shared_ptr& model) { for (size_t i = 0; i < inputs.size(); i += 2) { auto new_output = inputs.at(i + 1); const auto& name_node = inputs.at(i); - auto fw_node = std::dynamic_pointer_cast(name_node.get_node_shared_ptr()); + auto fw_node = ov::as_type_ptr(name_node.get_node_shared_ptr()); if (!fw_node) { add_exception_to_fw_node( dict_construct_node, diff --git a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp index 99aa253a9478e6..cb80987e4511ae 100644 --- a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp @@ -116,8 +116,8 @@ IRFFTNComplexReplacer::IRFFTNComplexReplacer() { // Handle norm parameter indicating normalization mode to use. Defaults to "backward". std::string norm; - if (const auto& fw_node_mode = std::dynamic_pointer_cast( - irfftn_op->input_value(3).get_node_shared_ptr())) { + if (const auto& fw_node_mode = + ov::as_type_ptr(irfftn_op->input_value(3).get_node_shared_ptr())) { const auto& attrs = fw_node_mode->get_attrs(); if (attrs.find("string_value") != attrs.end()) { norm = attrs.at("string_value"); diff --git a/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp index 35d5df54fe4d71..2240eec03c1251 100644 --- a/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/prim_list_unpack_replacer.cpp @@ -305,7 +305,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() { copy_runtime_info_and_name(list_unpack, rg.get(), {input_node, meshgrid_input_node}); replace_node(list_unpack, outputs); return true; - } else if (auto shape_of = std::dynamic_pointer_cast(input_node)) { + } else if (auto shape_of = ov::as_type_ptr(input_node)) { // case aten::size as input // Number of ListUnpack outputs should be equal to rank of input shape. auto axis_0 = v0::Constant::create(element::i32, Shape{}, {0}); @@ -321,7 +321,7 @@ PrimListUnpackReplacer::PrimListUnpackReplacer() { replace_node(list_unpack, res); return true; - } else if (auto slice = std::dynamic_pointer_cast(input_node)) { + } else if (auto slice = ov::as_type_ptr(input_node)) { // case aten::slice as input // Number of ListUnpack outputs should be equal to rank of input shape. auto axis_0 = v0::Constant::create(element::i32, Shape{}, {0}); diff --git a/src/frontends/pytorch/src/transforms/remove_packing_ops.cpp b/src/frontends/pytorch/src/transforms/remove_packing_ops.cpp index 125ddc29f16824..463e6ec7eb8895 100644 --- a/src/frontends/pytorch/src/transforms/remove_packing_ops.cpp +++ b/src/frontends/pytorch/src/transforms/remove_packing_ops.cpp @@ -116,7 +116,7 @@ RemovePackingOps::RemovePackingOps() { if (!pack_node) return false; if (as_type_ptr(pack_node)) - pack_node = std::dynamic_pointer_cast(pack_node->input_value(0).get_node_shared_ptr()); + pack_node = ov::as_type_ptr(pack_node->input_value(0).get_node_shared_ptr()); if (!pack_node) return false; diff --git a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp index f5b8f8a5f021a4..b90e3121930c71 100644 --- a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp @@ -90,8 +90,8 @@ RFFTNComplexReplacer::RFFTNComplexReplacer() { // Handle norm parameter indicating normalization mode to use. Defaults to "backward". std::string norm; - if (const auto& fw_node_mode = std::dynamic_pointer_cast( - rfftn_op->input_value(3).get_node_shared_ptr())) { + if (const auto& fw_node_mode = + ov::as_type_ptr(rfftn_op->input_value(3).get_node_shared_ptr())) { const auto& attrs = fw_node_mode->get_attrs(); if (attrs.find("string_value") != attrs.end()) { norm = attrs.at("string_value"); diff --git a/src/frontends/pytorch/src/transforms/softmax_reshape_elimination.hpp b/src/frontends/pytorch/src/transforms/softmax_reshape_elimination.hpp index 4157364046cf61..40b35954e58eb7 100644 --- a/src/frontends/pytorch/src/transforms/softmax_reshape_elimination.hpp +++ b/src/frontends/pytorch/src/transforms/softmax_reshape_elimination.hpp @@ -19,6 +19,7 @@ namespace pass { */ class SoftmaxReshapeElimination : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::SoftmaxReshapeElimination"); SoftmaxReshapeElimination(); }; diff --git a/src/frontends/pytorch/src/transforms/string_equality_replacer.cpp b/src/frontends/pytorch/src/transforms/string_equality_replacer.cpp index f9a741dedd3996..d378b2e9a27821 100644 --- a/src/frontends/pytorch/src/transforms/string_equality_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/string_equality_replacer.cpp @@ -34,8 +34,7 @@ StringEqualityReplacer::StringEqualityReplacer() { ov::matcher_pass_callback callback = [=](pattern::Matcher& m) { auto& pattern_map = m.get_pattern_value_map(); - auto lhs_node = - std::dynamic_pointer_cast(pattern_map.at(framework_node_lhs).get_node_shared_ptr()); + auto lhs_node = ov::as_type_ptr(pattern_map.at(framework_node_lhs).get_node_shared_ptr()); if (!lhs_node) { return false; } @@ -45,8 +44,7 @@ StringEqualityReplacer::StringEqualityReplacer() { } std::string lhs = lhs_attrs.at("string_value"); - auto rhs_node = - std::dynamic_pointer_cast(pattern_map.at(framework_node_rhs).get_node_shared_ptr()); + auto rhs_node = ov::as_type_ptr(pattern_map.at(framework_node_rhs).get_node_shared_ptr()); if (!rhs_node) { return false; } @@ -57,14 +55,14 @@ StringEqualityReplacer::StringEqualityReplacer() { std::string rhs = rhs_attrs.at("string_value"); auto equal_node = pattern_map.at(equal_op).get_node_shared_ptr(); - if (auto equal = std::dynamic_pointer_cast(equal_node)) { + if (auto equal = ov::as_type_ptr(equal_node)) { auto const_result = v0::Constant::create(element::boolean, Shape{}, {lhs == rhs}); copy_runtime_info_and_name(equal_node, {const_result}); replace_node(equal_node, const_result); return true; }; auto not_equal_node = pattern_map.at(not_equal_op).get_node_shared_ptr(); - if (auto equal = std::dynamic_pointer_cast(not_equal_node)) { + if (auto equal = ov::as_type_ptr(not_equal_node)) { auto const_result = v0::Constant::create(element::boolean, Shape{}, {lhs != rhs}); copy_runtime_info_and_name(equal_node, {const_result}); replace_node(equal_node, const_result); diff --git a/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.cpp b/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.cpp index 730da8f4c20a69..a9101cbd080890 100644 --- a/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/torchfx_gptq_pattern_replacer.cpp @@ -68,18 +68,14 @@ GPTQDecompressionReplacer::GPTQDecompressionReplacer() { } const auto& pattern_map = m.get_pattern_value_map(); auto unsqueeze_1_node = pattern_map.at(unsqueeze_1).get_node_shared_ptr(); - auto unsqueeze_1_in0_const = - std::dynamic_pointer_cast(unsqueeze_1_node->get_input_node_shared_ptr(0)); - auto unsqueeze_1_in1_const = - std::dynamic_pointer_cast(unsqueeze_1_node->get_input_node_shared_ptr(1)); + auto unsqueeze_1_in0_const = ov::as_type_ptr(unsqueeze_1_node->get_input_node_shared_ptr(0)); + auto unsqueeze_1_in1_const = ov::as_type_ptr(unsqueeze_1_node->get_input_node_shared_ptr(1)); auto abs_node = pattern_map.at(abs).get_node_shared_ptr(); - auto abs_in_const = std::dynamic_pointer_cast(abs_node->get_input_node_shared_ptr(0)); + auto abs_in_const = ov::as_type_ptr(abs_node->get_input_node_shared_ptr(0)); auto broadcast_node = pattern_map.at(broadcast).get_node_shared_ptr(); auto unsqueeze_2_node = pattern_map.at(unsqueeze_2).get_node_shared_ptr(); - auto unsqueeze_2_in0_const = - std::dynamic_pointer_cast(unsqueeze_2_node->get_input_node_shared_ptr(0)); - auto unsqueeze_2_in1_const = - std::dynamic_pointer_cast(unsqueeze_2_node->get_input_node_shared_ptr(1)); + auto unsqueeze_2_in0_const = ov::as_type_ptr(unsqueeze_2_node->get_input_node_shared_ptr(0)); + auto unsqueeze_2_in1_const = ov::as_type_ptr(unsqueeze_2_node->get_input_node_shared_ptr(1)); OutputVector outputs_1(unsqueeze_1_node->get_output_size()); OutputVector unsqueeze_1_inputs(2); @@ -110,9 +106,9 @@ GPTQDecompressionReplacer::GPTQDecompressionReplacer() { return false; } const int32_t* rs_in0 = - std::dynamic_pointer_cast(outputs_3[0].get_node_shared_ptr())->get_data_ptr(); + ov::as_type_ptr(outputs_3[0].get_node_shared_ptr())->get_data_ptr(); const int32_t* rs_in1 = - std::dynamic_pointer_cast(outputs_4[0].get_node_shared_ptr())->get_data_ptr(); + ov::as_type_ptr(outputs_4[0].get_node_shared_ptr())->get_data_ptr(); auto shifted_const = std::make_shared(element::i32, outputs_3[0].get_shape()); auto dst = const_cast(reinterpret_cast(shifted_const->get_data_ptr())); if (!dst) @@ -156,8 +152,7 @@ GPTQDecompressionReplacer::GPTQDecompressionReplacer() { } else { auto convert_3_node = pattern_map.at(convert_3).get_node_shared_ptr(); auto convert_4_node = pattern_map.at(convert_4).get_node_shared_ptr(); - auto convert_4_in_const = - std::dynamic_pointer_cast(convert_4_node->get_input_node_shared_ptr(0)); + auto convert_4_in_const = ov::as_type_ptr(convert_4_node->get_input_node_shared_ptr(0)); auto add_node = pattern_map.at(add).get_node_shared_ptr(); OutputVector outputs_5(convert_3_node->get_output_size()); if (!convert_3_node->constant_fold(outputs_5, shifted_const->outputs())) { @@ -177,7 +172,7 @@ GPTQDecompressionReplacer::GPTQDecompressionReplacer() { } auto convert_2_node = pattern_map.at(convert_2).get_node_shared_ptr(); - auto convert_2_in_const = std::dynamic_pointer_cast(convert_2_node->get_input_node_shared_ptr(0)); + auto convert_2_in_const = ov::as_type_ptr(convert_2_node->get_input_node_shared_ptr(0)); OutputVector outputs_8(convert_2_node->get_output_size()); if (!convert_2_node->constant_fold(outputs_8, convert_2_in_const->outputs())) { @@ -187,9 +182,9 @@ GPTQDecompressionReplacer::GPTQDecompressionReplacer() { OutputVector outputs_9(bitwise_and->get_output_size()); const int8_t* and_in0 = - std::dynamic_pointer_cast(outputs_7[0].get_node_shared_ptr())->get_data_ptr(); + ov::as_type_ptr(outputs_7[0].get_node_shared_ptr())->get_data_ptr(); const int8_t* and_in1 = - std::dynamic_pointer_cast(outputs_8[0].get_node_shared_ptr())->get_data_ptr(); + ov::as_type_ptr(outputs_8[0].get_node_shared_ptr())->get_data_ptr(); auto masked_const = std::make_shared(element::i8, outputs_7[0].get_shape()); auto masked_dst = const_cast(reinterpret_cast(masked_const->get_data_ptr())); if (!masked_dst) @@ -258,15 +253,14 @@ GPTQMultPatternReplacer::GPTQMultPatternReplacer() { auto reshape3_node = pattern_map.at(reshape_3).get_node_shared_ptr(); // auto mult_node = pattern_map.at(mult).get_node_shared_ptr(); - auto add_input0_const = std::dynamic_pointer_cast(convert_1_node->get_input_node_shared_ptr(0)); + auto add_input0_const = ov::as_type_ptr(convert_1_node->get_input_node_shared_ptr(0)); if (add_input0_const->get_element_type() != element::u4) { return false; } auto add_in0_ptr = add_input0_const->get_data_ptr(); uint32_t add_val = 0; if (convert_2_node) { - auto convert_2_input_const = - std::dynamic_pointer_cast(convert_2_node->get_input_node_shared_ptr(0)); + auto convert_2_input_const = ov::as_type_ptr(convert_2_node->get_input_node_shared_ptr(0)); auto add_in1_ptr = convert_2_input_const->get_data_ptr(); if (!add_in1_ptr) return false; @@ -289,7 +283,7 @@ GPTQMultPatternReplacer::GPTQMultPatternReplacer() { } const auto& static_shape_2 = reshape2_node->get_shape(); - auto reshape2_in0_const = std::dynamic_pointer_cast(convert_4_node->get_input_node_shared_ptr(0)); + auto reshape2_in0_const = ov::as_type_ptr(convert_4_node->get_input_node_shared_ptr(0)); auto sub_replace_const = std::make_shared(reshape2_in0_const->get_element_type(), static_shape_2, reshape2_in0_const->get_data_ptr()); @@ -297,7 +291,7 @@ GPTQMultPatternReplacer::GPTQMultPatternReplacer() { auto new_sub_node = std::make_shared(new_convert_node, add_replace_const); const auto& static_shape_3 = reshape3_node->get_shape(); - auto reshape3_in0_const = std::dynamic_pointer_cast(reshape3_node->get_input_node_shared_ptr(0)); + auto reshape3_in0_const = ov::as_type_ptr(reshape3_node->get_input_node_shared_ptr(0)); auto mult_scale_const = std::make_shared(reshape3_in0_const->get_element_type(), static_shape_3, reshape3_in0_const->get_data_ptr()); diff --git a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.cpp b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.cpp index dd9bef56384051..e6993dfb55077b 100644 --- a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.cpp @@ -52,7 +52,7 @@ bool TupleUnpackInBodyReplacer::run_on_model(const std::shared_ptr& model if (if_op) { for (size_t i = 1; i < if_op->get_input_size(); i++) { auto input = if_op->input_value(i); - auto tuple_construct = std::dynamic_pointer_cast( + auto tuple_construct = ov::as_type_ptr( cast_fw_node(input.get_node_shared_ptr(), "prim::TupleConstruct")); if (!tuple_construct) { continue; diff --git a/src/frontends/pytorch/src/transforms/u4_block_repack.cpp b/src/frontends/pytorch/src/transforms/u4_block_repack.cpp index 3e6e3eac0b5571..4c8bdb04a78039 100644 --- a/src/frontends/pytorch/src/transforms/u4_block_repack.cpp +++ b/src/frontends/pytorch/src/transforms/u4_block_repack.cpp @@ -49,8 +49,7 @@ U4BlockRepack::U4BlockRepack(bool is_symmetrical) { std::make_shared(m_reshape2, "ov::frontend::pytorch::pass::U4BlockRepack"), [=](Matcher& m) { auto& pattern_to_output = m.get_pattern_value_map(); - auto constant = - std::dynamic_pointer_cast(pattern_to_output[m_constant].get_node_shared_ptr()); + auto constant = ov::as_type_ptr(pattern_to_output[m_constant].get_node_shared_ptr()); if (!constant) return false; auto reshape1 = pattern_to_output[m_reshape1].get_node_shared_ptr(); @@ -89,13 +88,13 @@ U4BlockRepack::U4BlockRepack(bool is_symmetrical) { if (reshape_targets.size() != 1) return false; auto convert = reshape_targets.begin()->get_node()->shared_from_this(); - if (!std::dynamic_pointer_cast(convert)) + if (!ov::as_type_ptr(convert)) return false; auto convert_targets = convert->output(0).get_target_inputs(); if (convert_targets.size() != 1) return false; auto subtract = convert_targets.begin()->get_node()->shared_from_this(); - if (!std::dynamic_pointer_cast(subtract)) + if (!ov::as_type_ptr(subtract)) return false; pattern_root = subtract; copy_from.push_back(std::move(convert)); @@ -145,8 +144,7 @@ U4ConvertReshape::U4ConvertReshape() { std::make_shared(m_reshape, "ov::frontend::pytorch::pass::U4ConvertReshape"), [=](Matcher& m) { auto& pattern_to_output = m.get_pattern_value_map(); - auto u4_const = - std::dynamic_pointer_cast(pattern_to_output[m_constant].get_node_shared_ptr()); + auto u4_const = ov::as_type_ptr(pattern_to_output[m_constant].get_node_shared_ptr()); if (!u4_const) return false; @@ -158,15 +156,15 @@ U4ConvertReshape::U4ConvertReshape() { std::shared_ptr new_const; if (pattern_to_output.count(m_constant_8)) { - auto constant_8 = std::dynamic_pointer_cast( - pattern_to_output[m_constant_8].get_node_shared_ptr()); + auto constant_8 = + ov::as_type_ptr(pattern_to_output[m_constant_8].get_node_shared_ptr()); if (ov::shape_size(constant_8->get_output_shape(0)) != 1 || constant_8->get_output_element_type(0).is_real() || constant_8->cast_vector()[0] != 8) return false; if (pattern_to_output.count(m_constant_1)) { - auto constant_1 = std::dynamic_pointer_cast( - pattern_to_output[m_constant_1].get_node_shared_ptr()); + auto constant_1 = + ov::as_type_ptr(pattern_to_output[m_constant_1].get_node_shared_ptr()); if (ov::shape_size(constant_1->get_output_shape(0)) != 1 || constant_1->get_output_element_type(0).is_real() || constant_1->cast_vector()[0] != 1) return false; diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 171445b959eeaa..da0b5c5cd24d61 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -200,8 +200,7 @@ element::Type convert_dtype(int64_t pt_type) { }; Output apply_dtype(const NodeContext& context, size_t dtype_port, const Output& input_tensor) { - if (std::dynamic_pointer_cast( - context.get_input_from_visible_context(dtype_port).get_node_shared_ptr())) { + if (ov::as_type_ptr(context.get_input_from_visible_context(dtype_port).get_node_shared_ptr())) { auto dtype = convert_dtype(context.const_input(dtype_port)); return context.mark_node(std::make_shared(input_tensor, dtype)); } else if (const auto& fw_node = @@ -373,7 +372,7 @@ OutputVector make_framework_node(const NodeContext& context, const std::string& } std::shared_ptr cast_fw_node(std::shared_ptr node, const std::string& type) { - auto fw_node = std::dynamic_pointer_cast(node); + auto fw_node = ov::as_type_ptr(node); if (!fw_node) { return nullptr; } @@ -386,7 +385,7 @@ std::shared_ptr cast_fw_node(std::shared_ptr std::shared_ptr cast_fw_node(std::shared_ptr node, std::initializer_list types) { - auto fw_node = std::dynamic_pointer_cast(node); + auto fw_node = ov::as_type_ptr(node); if (!fw_node) { return nullptr; } @@ -410,7 +409,7 @@ std::shared_ptr make_list_construct(const ov::OutputVector& inputs) { } bool is_none_node(const Output& node) { - if (const auto& fw_node_inp = std::dynamic_pointer_cast(node.get_node_shared_ptr())) { + if (const auto& fw_node_inp = ov::as_type_ptr(node.get_node_shared_ptr())) { const auto& attrs = fw_node_inp->get_attrs(); if (attrs.find("none_value") != attrs.end()) { return true; @@ -523,7 +522,7 @@ Output get_input_as_i32(const NodeContext& context, size_t idx) { Output get_input_concat_if_list(const NodeContext& context, size_t idx) { auto x = context.get_input(static_cast(idx)); if (context.get_input_type(idx).is() && - std::dynamic_pointer_cast(x.get_node_shared_ptr())) { + ov::as_type_ptr(x.get_node_shared_ptr())) { auto elems = get_list_as_outputs(x, true); if (elems.size() == 0) // Can we figure real type for empty list? @@ -562,7 +561,7 @@ std::deque> get_list_as_outputs(const Output& start, bool uns auto current_output = start; auto zero = v0::Constant::create(element::i32, Shape{}, {0}); while (const auto& input_fw_node = - std::dynamic_pointer_cast(current_output.get_node_shared_ptr())) { + ov::as_type_ptr(current_output.get_node_shared_ptr())) { const auto& attrs = input_fw_node->get_attrs(); if (attrs.find(PtFrameworkNode::op_type_key) == attrs.end()) { break; diff --git a/src/frontends/pytorch/src/utils_quantize.cpp b/src/frontends/pytorch/src/utils_quantize.cpp index 1e47573f7e9c36..76d5dc36a77af5 100644 --- a/src/frontends/pytorch/src/utils_quantize.cpp +++ b/src/frontends/pytorch/src/utils_quantize.cpp @@ -212,7 +212,7 @@ Output quantize_fx(const NodeContext& context, } std::shared_ptr cast_quantized_fw_node(std::shared_ptr node) { - auto quant_node = std::dynamic_pointer_cast(node); + auto quant_node = ov::as_type_ptr(node); if (!quant_node) { return nullptr; } @@ -232,7 +232,7 @@ std::shared_ptr u4_compression_stack(const OutputVector& list_elems, int64 auto bitwise_and_candidate = list_elems[0].get_node_shared_ptr(); std::shared_ptr bitwise_and = cast_fw_node(bitwise_and_candidate, "aten::bitwise_and"); if (!bitwise_and) { - bitwise_and = std::dynamic_pointer_cast(bitwise_and_candidate); + bitwise_and = ov::as_type_ptr(bitwise_and_candidate); if (!bitwise_and) return nullptr; } @@ -242,9 +242,8 @@ std::shared_ptr u4_compression_stack(const OutputVector& list_elems, int64 if (!bitwise_shift) return nullptr; - auto weights_u8 = std::dynamic_pointer_cast(bitwise_and->get_input_node_shared_ptr(0)); - auto weights_u8_bitwise_shift = - std::dynamic_pointer_cast(bitwise_shift->get_input_node_shared_ptr(0)); + auto weights_u8 = ov::as_type_ptr(bitwise_and->get_input_node_shared_ptr(0)); + auto weights_u8_bitwise_shift = ov::as_type_ptr(bitwise_shift->get_input_node_shared_ptr(0)); if (weights_u8->get_data_ptr() != weights_u8_bitwise_shift->get_data_ptr()) return nullptr; diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp index b276d2b5a4ed93..af609088679e14 100644 --- a/src/frontends/tensorflow/src/frontend.cpp +++ b/src/frontends/tensorflow/src/frontend.cpp @@ -66,7 +66,7 @@ void get_unsupported_operations_and_failures(const std::shared_ptr& model std::set& unsupported_operations, std::unordered_map& failures) { for (const auto& node : model->get_ordered_ops()) { - if (const auto& internal_op = std::dynamic_pointer_cast(node)) { + if (const auto& internal_op = ov::as_type_ptr(node)) { // handle internal operations separately // which can have elaborated reason of unconverted operation // like Const of string type @@ -546,7 +546,7 @@ std::shared_ptr FrontEnd::decode(const ov::frontend::InputModel::Ptr& void FrontEnd::convert(const std::shared_ptr& partiallyConverted) const { for (const auto& node : partiallyConverted->get_ordered_ops()) { if (ov::is_type(node)) { - translate_framework_node(std::dynamic_pointer_cast(node), m_op_translators); + translate_framework_node(ov::as_type_ptr(node), m_op_translators); } } for (const auto& result : partiallyConverted->get_results()) { diff --git a/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.cpp b/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.cpp index 6c268f77b910ce..9cedaff7bf06a5 100644 --- a/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.cpp +++ b/src/frontends/tensorflow/src/transformations/uninitialized_variable_resolve.cpp @@ -20,7 +20,7 @@ ov::frontend::tensorflow::pass::UninitializedVariableResolver::UninitializedVari matcher_pass_callback callback = [=](pattern::Matcher& m) { NodeRegistry rg; - auto unitialized_hash_table = dynamic_pointer_cast(m.get_match_root()); + auto unitialized_hash_table = ov::as_type_ptr(m.get_match_root()); if (!unitialized_hash_table) { return false; } diff --git a/src/frontends/tensorflow/src/translate_session.cpp b/src/frontends/tensorflow/src/translate_session.cpp index 3004d4953d5c53..efac0d96e9880b 100644 --- a/src/frontends/tensorflow/src/translate_session.cpp +++ b/src/frontends/tensorflow/src/translate_session.cpp @@ -529,7 +529,7 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu const auto& input_outputs_vector = ov_tensors_map->at(producer_name); if (input_outputs_vector.size() <= producer_port_idx) { auto producer_node = input_outputs_vector[0].port.get_node_shared_ptr(); - if (std::dynamic_pointer_cast(producer_node)) { + if (ov::as_type_ptr(producer_node)) { // FrameworkNode node does not know in advance how many output ports will be used // so we can increase number of outputs by demand producer_node->set_output_type(producer_port_idx, element::dynamic, PartialShape::dynamic()); @@ -583,13 +583,13 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu // We can't add all Sink operations to sinks vector, as there can be a FrameworkNode, // which we might need to remove from graph if (ov::as_type_ptr(node)) { - sinks.push_back(std::dynamic_pointer_cast(node)); + sinks.push_back(ov::as_type_ptr(node)); } else { - auto multi_subgraph = std::dynamic_pointer_cast(node); + auto multi_subgraph = ov::as_type_ptr(node); if (multi_subgraph) { for (const auto& body_model : multi_subgraph->get_functions()) { if (body_model->get_sinks().size()) { - sinks.push_back(std::dynamic_pointer_cast(multi_subgraph)); + sinks.push_back(ov::as_type_ptr(multi_subgraph)); break; } } @@ -738,7 +738,7 @@ void TranslateSession::translate_graph(const ov::frontend::InputModel::Ptr& inpu for (size_t output_ind = 0; output_ind < node_output_vector.second.size(); ++output_ind) { auto output = node_output_vector.second[output_ind].port; if (output.get_target_inputs().empty() && - !std::dynamic_pointer_cast(output.get_node_shared_ptr())) { + !ov::as_type_ptr(output.get_node_shared_ptr())) { auto model_output_name = output.get_node_shared_ptr()->get_friendly_name() + ":" + std::to_string(output_ind); auto result_node = std::make_shared(output); diff --git a/src/frontends/tensorflow/tests/convert_tricky_models.cpp b/src/frontends/tensorflow/tests/convert_tricky_models.cpp index ffb5ece8a2d2f9..d50e187d2bcfc8 100644 --- a/src/frontends/tensorflow/tests/convert_tricky_models.cpp +++ b/src/frontends/tensorflow/tests/convert_tricky_models.cpp @@ -110,7 +110,7 @@ TEST(FrontEndConvertTrickyModels, simple_wide_and_deep) { int num_emb_segment_sum = 0; for (auto& node : model->get_ordered_ops()) { - if (std::dynamic_pointer_cast(node)) { + if (ov::as_type_ptr(node)) { ++num_emb_segment_sum; } } diff --git a/src/frontends/tensorflow/tests/convert_unsupported.cpp b/src/frontends/tensorflow/tests/convert_unsupported.cpp index f37c1419854139..bc06ce0dc418a2 100644 --- a/src/frontends/tensorflow/tests/convert_unsupported.cpp +++ b/src/frontends/tensorflow/tests/convert_unsupported.cpp @@ -112,7 +112,7 @@ TEST(FrontEndConvertModelTest, test_unsupported_op) { ASSERT_THROW(frontEnd->convert(model), OpConversionFailure); for (auto& node : model->get_ordered_ops()) { - if (node->get_friendly_name() == "relu_0" && dynamic_pointer_cast(node)) { + if (node->get_friendly_name() == "relu_0" && ov::as_type_ptr(node)) { model->replace_node(node, make_shared(node->input(0).get_source_output())); } } diff --git a/src/frontends/tensorflow_common/src/helper_transforms/tensor_array_v3_replacer.cpp b/src/frontends/tensorflow_common/src/helper_transforms/tensor_array_v3_replacer.cpp index 1e3fa977db8a89..a06832898e3e39 100644 --- a/src/frontends/tensorflow_common/src/helper_transforms/tensor_array_v3_replacer.cpp +++ b/src/frontends/tensorflow_common/src/helper_transforms/tensor_array_v3_replacer.cpp @@ -23,7 +23,7 @@ ov::frontend::tensorflow::pass::TensorArrayV3Replacer::TensorArrayV3Replacer() { matcher_pass_callback callback = [=](pattern::Matcher& m) { NodeRegistry rg; - auto tensor_array_v3 = dynamic_pointer_cast(m.get_match_root()); + auto tensor_array_v3 = ov::as_type_ptr(m.get_match_root()); if (!tensor_array_v3) { return false; } diff --git a/src/frontends/tensorflow_common/src/helper_transforms/tensor_list_ops_resolver.cpp b/src/frontends/tensorflow_common/src/helper_transforms/tensor_list_ops_resolver.cpp index 81eade74e15233..1baff1008f1dae 100644 --- a/src/frontends/tensorflow_common/src/helper_transforms/tensor_list_ops_resolver.cpp +++ b/src/frontends/tensorflow_common/src/helper_transforms/tensor_list_ops_resolver.cpp @@ -105,7 +105,7 @@ void update_parameter_to_slice_input(const std::shared_ptr& node, std::vector& update_param_ids) { // select only TensorListGetItem that accepts a tensor list from Parameter node // value of Parameter node is unchanged from one iteration to another one in Loop - auto tensor_list_get_item = std::dynamic_pointer_cast(node); + auto tensor_list_get_item = ov::as_type_ptr(node); if (!tensor_list_get_item) { return; } @@ -142,7 +142,7 @@ void update_result_to_concat_output(const std::shared_ptr& node, std::vector& remove_param_ids) { // select only TensorListSetItem that accepts a tensor list from Parameter node // output of TensorListSetItem goes to Result that is connected with the tensor list by a back edge - auto tensor_list_set_item = std::dynamic_pointer_cast(node); + auto tensor_list_set_item = ov::as_type_ptr(node); if (!tensor_list_set_item) { return; } @@ -202,7 +202,7 @@ ov::frontend::tensorflow::pass::TensorListReplacer::TensorListReplacer() { matcher_pass_callback callback = [=](pattern::Matcher& m) { NodeRegistry rg; - auto tensor_list = std::dynamic_pointer_cast(m.get_match_root()); + auto tensor_list = ov::as_type_ptr(m.get_match_root()); if (!tensor_list) { return false; } @@ -255,7 +255,7 @@ ov::frontend::tensorflow::pass::TensorListSetItemReplacer::TensorListSetItemRepl matcher_pass_callback callback = [=](pattern::Matcher& m) { NodeRegistry rg; - auto tensor_list_set_item = std::dynamic_pointer_cast(m.get_match_root()); + auto tensor_list_set_item = ov::as_type_ptr(m.get_match_root()); if (!tensor_list_set_item) { return false; } @@ -309,7 +309,7 @@ ov::frontend::tensorflow::pass::TensorListPushBackReplacer::TensorListPushBackRe matcher_pass_callback callback = [=](pattern::Matcher& m) { NodeRegistry rg; - auto tensor_list_push_back = std::dynamic_pointer_cast(m.get_match_root()); + auto tensor_list_push_back = ov::as_type_ptr(m.get_match_root()); if (!tensor_list_push_back) { return false; } @@ -353,7 +353,7 @@ ov::frontend::tensorflow::pass::TensorListGetItemReplacer::TensorListGetItemRepl matcher_pass_callback callback = [=](pattern::Matcher& m) { NodeRegistry rg; - auto tensor_list_get_item = std::dynamic_pointer_cast(m.get_match_root()); + auto tensor_list_get_item = ov::as_type_ptr(m.get_match_root()); if (!tensor_list_get_item) { return false; } @@ -491,8 +491,7 @@ ov::frontend::tensorflow::pass::TensorListInLoopOptimization::TensorListInLoopOp std::vector update_result_last_iter_ids; for (uint64_t result_idx = 0; result_idx < body_results.size(); ++result_idx) { const auto& result = body_results[result_idx]; - auto tensor_list_set_item = - std::dynamic_pointer_cast(result->get_input_node_shared_ptr(0)); + auto tensor_list_set_item = ov::as_type_ptr(result->get_input_node_shared_ptr(0)); if (!tensor_list_set_item) { continue; } @@ -529,8 +528,7 @@ ov::frontend::tensorflow::pass::TensorListInLoopOptimization::TensorListInLoopOp update_result_last_iter_ids.end()); for (auto update_result_idx : all_update_result_ids) { const auto& body_result = body_results[update_result_idx]; - auto tensor_list_set_item = - std::dynamic_pointer_cast(body_result->get_input_node_shared_ptr(0)); + auto tensor_list_set_item = ov::as_type_ptr(body_result->get_input_node_shared_ptr(0)); FRONT_END_GENERAL_CHECK(tensor_list_set_item, "[TensorFlow Frontend] internal error: tensor_list_set_item is nullptr in " "TensorListInLoopOptimization"); @@ -559,7 +557,7 @@ ov::frontend::tensorflow::pass::TensorListInLoopOptimization::TensorListInLoopOp "TensorListGetItem operation in TensorListInLoopOptimization"); auto target_input = *(body_param->get_output_target_inputs(0).begin()); auto tensor_list_get_item = - std::dynamic_pointer_cast(target_input.get_node()->shared_from_this()); + ov::as_type_ptr(target_input.get_node()->shared_from_this()); FRONT_END_GENERAL_CHECK(tensor_list_get_item, "[TensorFlow Frontend] internal error: tensor list must have only consumer " "TensorListGetItem operation in TensorListInLoopOptimization"); diff --git a/src/frontends/tensorflow_lite/src/frontend.cpp b/src/frontends/tensorflow_lite/src/frontend.cpp index bbf55a0f6f12f6..30cceeeb10b7dc 100644 --- a/src/frontends/tensorflow_lite/src/frontend.cpp +++ b/src/frontends/tensorflow_lite/src/frontend.cpp @@ -140,8 +140,7 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr void FrontEnd::convert(const std::shared_ptr& partiallyConverted) const { for (const auto& node : partiallyConverted->get_ordered_ops()) { if (ov::is_type(node)) { - translate_framework_node(std::dynamic_pointer_cast(node), - m_op_translators); + translate_framework_node(ov::as_type_ptr(node), m_op_translators); } } for (const auto& result : partiallyConverted->get_results()) { diff --git a/src/frontends/tests/frontend/shared/include/op_extension.hpp b/src/frontends/tests/frontend/shared/include/op_extension.hpp index 563a80739ef7e1..791911052eafb0 100644 --- a/src/frontends/tests/frontend/shared/include/op_extension.hpp +++ b/src/frontends/tests/frontend/shared/include/op_extension.hpp @@ -18,7 +18,7 @@ struct OpExtensionFEParam { class Relu : public ov::op::Op { public: - OPENVINO_OP("Relu"); + OPENVINO_OP("Relu", "frontend_test"); Relu() = default; Relu(const ov::Output& arg) : ov::op::Op({arg}) { diff --git a/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp b/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp index 53e634e1b1087e..019c05d0108907 100644 --- a/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp +++ b/src/frontends/tests/frontend/shared/src/cut_specific_model.cpp @@ -240,7 +240,7 @@ TEST_P(FrontEndCutModelTest, testSetTensorValue) { return node->get_friendly_name().find(const_name) != std::string::npos; }); ASSERT_TRUE(const_node_it != ops.end()) << "Name shall exist:" << const_name; - auto data = std::dynamic_pointer_cast(*const_node_it)->get_vector(); + auto data = ov::as_type_ptr(*const_node_it)->get_vector(); EXPECT_EQ(data.size(), m_param.m_tensorValue.size()) << "Data size must be equal to expected size"; EXPECT_TRUE(std::equal(data.begin(), data.end(), m_param.m_tensorValue.begin())) << "Data must be equal"; } diff --git a/src/frontends/tests/frontend/shared/test_builtin_extensions/builtin_extensions.cpp b/src/frontends/tests/frontend/shared/test_builtin_extensions/builtin_extensions.cpp index 792ef552907000..09fecb89ad9e90 100644 --- a/src/frontends/tests/frontend/shared/test_builtin_extensions/builtin_extensions.cpp +++ b/src/frontends/tests/frontend/shared/test_builtin_extensions/builtin_extensions.cpp @@ -94,7 +94,7 @@ std::map Relu6ToReluTranslatorPaddle(const ov::fr class CustomElu : public ov::op::Op { public: - OPENVINO_OP("CustomElu"); + OPENVINO_OP("CustomElu", "frontend_test"); CustomElu() = default; CustomElu(const ov::Output& input, float alpha, float beta) : m_alpha{alpha}, m_beta{beta} { @@ -159,7 +159,7 @@ class CustomElu : public ov::op::Op { # include "openvino/op/relu.hpp" class ReluCustom : public ov::op::v0::Relu { public: - OPENVINO_OP("ReluCustom"); + OPENVINO_OP("ReluCustom", "frontend_test"); OPENVINO_FRAMEWORK_MAP(pytorch, "aten::relu"); }; # define PT_EXT \ diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp index d2d21da878b3c6..a05723b24b8d34 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/include/utils/model.hpp @@ -91,12 +91,12 @@ generate_model(ov::NodeVector& nodes, // cloned_in_node is parameter or constant, it could have only one input ov::replace_output_update_name(cloned_in_node->output(cloned_in_node_out_idx), orig_in_node->output(out_idx)); if (ov::op::util::is_parameter(orig_in_node)) { - auto param = std::dynamic_pointer_cast(orig_in_node); + auto param = ov::as_type_ptr(orig_in_node); model_parameters.push_back(param); node_input_info.insert({ orig_in_node->get_friendly_name(), node_input_info[cloned_in_node_name]}); } else if (ov::op::util::is_constant(orig_in_node)) { - auto op_to_replace = std::dynamic_pointer_cast(orig_in_node); + auto op_to_replace = ov::as_type_ptr(orig_in_node); auto param = convert_const_to_param(op_to_replace); if (param != nullptr) { model_parameters.push_back(param); @@ -104,7 +104,7 @@ generate_model(ov::NodeVector& nodes, node_input_info.insert({ orig_in_node->get_friendly_name(), node_input_info[cloned_in_node_name]}); } else if (ov::op::util::is_sink(cloned_node)) { - model_sinks.push_back(std::dynamic_pointer_cast(cloned_node->shared_from_this())); + model_sinks.push_back(ov::as_type_ptr(cloned_node->shared_from_this())); } filled_input_idx++; // clean up replaced node data @@ -114,10 +114,10 @@ generate_model(ov::NodeVector& nodes, model_output_nodes.erase(orig_in_node_name); } } else if (ov::op::util::is_parameter(cloned_in_node)) { - auto param = std::dynamic_pointer_cast(cloned_in_node); + auto param = ov::as_type_ptr(cloned_in_node); model_parameters.push_back(param); } else if (ov::op::util::is_constant(cloned_in_node)) { - auto op_to_replace = std::dynamic_pointer_cast(cloned_in_node); + auto op_to_replace = ov::as_type_ptr(cloned_in_node); auto param = convert_const_to_param(op_to_replace); if (param != nullptr) { model_parameters.push_back(param); @@ -140,7 +140,7 @@ generate_model(ov::NodeVector& nodes, for (const auto& out_node_name : model_output_nodes) { auto out_node = cloned_node_map[out_node_name.first]; if (ov::op::util::is_output(out_node)) { - model_results.push_back(std::dynamic_pointer_cast(out_node)); + model_results.push_back(ov::as_type_ptr(out_node)); } else { for (const auto& out_port_id : out_node_name.second) { model_results.push_back(std::make_shared(out_node->output(out_port_id))); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/op_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/op_cache.cpp index f1550853fec90e..e31502f3d98374 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/op_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/cache/op_cache.cpp @@ -22,28 +22,28 @@ void OpCache::update_cache(const std::shared_ptr& model, std::cout << "[ INFO ][ OP CACHE ] Processing model: " << model_path << std::endl; size_t model_op_cnt = model->get_ops().size() - model->get_output_size() - model->inputs().size(); for (const auto& op : model->get_ordered_ops()) { - if (std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op) || + if (ov::as_type_ptr(op) || + ov::as_type_ptr(op) || + ov::as_type_ptr(op) || // ReadValue and Assign have to be handled in pair // Will be handled as part of 48838 - std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op)) { + ov::as_type_ptr(op) || + ov::as_type_ptr(op)) { continue; } if (extract_body) { - if (std::dynamic_pointer_cast(op)) { - auto if_op = std::dynamic_pointer_cast(op); + if (ov::as_type_ptr(op)) { + auto if_op = ov::as_type_ptr(op); for (size_t i = 0; i < if_op->get_internal_subgraphs_size(); i++) { auto if_body = if_op->get_function(i); update_cache(if_body, model_path, extract_body, from_cache); } - } else if (std::dynamic_pointer_cast(op)) { - auto loop = std::dynamic_pointer_cast(op); + } else if (ov::as_type_ptr(op)) { + auto loop = ov::as_type_ptr(op); auto loop_body = loop->get_function(); update_cache(loop_body, model_path, extract_body, from_cache); - } else if (std::dynamic_pointer_cast(op)) { - auto ti = std::dynamic_pointer_cast(op); + } else if (ov::as_type_ptr(op)) { + auto ti = ov::as_type_ptr(op); auto ti_body = ti->get_function(); update_cache(ti_body, model_path, extract_body, from_cache); } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp index c5504b014c49a3..1f95077d285560 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/single_op/convolutions.cpp @@ -47,8 +47,8 @@ bool ConvolutionsMatcher::match_inputs(const std::shared_ptr &node, if (!SingleOpMatcher::match_inputs(node, ref)) { return false; } - bool has_groups = std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node); + bool has_groups = ov::as_type_ptr(node) || + ov::as_type_ptr(node); size_t kernel_size_offset = has_groups ? 3 : 2; auto ref_weights_shape = ref->get_input_partial_shape(1).get_shape(); auto cur_weights_shape = node->get_input_partial_shape(1).get_shape(); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp index 7c35658b361098..402d924a24f188 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/fused_names.cpp @@ -207,17 +207,17 @@ FusedNamesExtractor::extract(const std::shared_ptr &model) { nodes.push_back(op); } if (is_extract_body) { - if (std::dynamic_pointer_cast(op)) { + if (ov::as_type_ptr(op)) { auto ti = ov::as_type_ptr(op); auto ti_body = ti->get_function(); auto tmp_res = extract(ti_body); matched_patterns.insert(matched_patterns.end(), tmp_res.begin(), tmp_res.end()); - } else if (std::dynamic_pointer_cast(op)) { + } else if (ov::as_type_ptr(op)) { auto loop = ov::as_type_ptr(op); auto loop_body = loop->get_function(); auto tmp_res = extract(loop_body); matched_patterns.insert(matched_patterns.end(), tmp_res.begin(), tmp_res.end()); - } else if (std::dynamic_pointer_cast(op)) { + } else if (ov::as_type_ptr(op)) { auto if_op = ov::as_type_ptr(op); std::vector> bodies; for (size_t i = 0; i < if_op->get_internal_subgraphs_size(); i++) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/read_value_assign.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/read_value_assign.cpp index e0ab7f29857efa..485b4dd2d98026 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/read_value_assign.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/read_value_assign.cpp @@ -24,10 +24,10 @@ ReadValueAssignExtractor::extract(const std::shared_ptr &model) { }; std::map pairs; for (auto& node : model->get_ordered_ops()) { - if (const auto& assign = std::dynamic_pointer_cast(node)) { + if (const auto& assign = ov::as_type_ptr(node)) { pairs[assign->get_variable()].cnt_assign++; pairs[assign->get_variable()].variable_id = assign->get_variable()->get_info().variable_id; - } else if (const auto& read_value = std::dynamic_pointer_cast(node)) { + } else if (const auto& read_value = ov::as_type_ptr(node)) { pairs[read_value->get_variable()].cnt_read_val++; pairs[read_value->get_variable()].rv = read_value; pairs[read_value->get_variable()].variable_id = read_value->get_variable()->get_info().variable_id; @@ -49,7 +49,7 @@ ReadValueAssignExtractor::extract(const std::shared_ptr &model) { while (bfs_queue.size() != 0) { auto node = bfs_queue.front(); all_extracted_nodes.push_back(node); - if (const auto& assign = std::dynamic_pointer_cast(node)) { + if (const auto& assign = ov::as_type_ptr(node)) { if (assign->get_variable()->get_info().variable_id == pair.second.variable_id) { break; } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp index be0e21a1b3d0d4..0c95ab587e8bfe 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/matchers/subgraph/repeat_pattern.cpp @@ -306,17 +306,17 @@ RepeatPatternExtractor::find_repeat_patterns(const std::shared_ptr &m if (is_extract_body) { for (const auto& matched_node_idx : matched_nodes) { const auto& matched_node = ordered_ops[matched_node_idx]; - if (std::dynamic_pointer_cast(matched_node)) { + if (ov::as_type_ptr(matched_node)) { auto ti = ov::as_type_ptr(matched_node); auto ti_body = ti->get_function(); auto secondary_patterns = find_repeat_patterns(ti_body, is_save_borders_only); update_extractor_cache(extracted_patterns, secondary_patterns); - } else if (std::dynamic_pointer_cast(matched_node)) { + } else if (ov::as_type_ptr(matched_node)) { auto loop = ov::as_type_ptr(matched_node); auto loop_body = loop->get_function(); auto secondary_patterns = find_repeat_patterns(loop_body, is_save_borders_only); update_extractor_cache(extracted_patterns, secondary_patterns); - } else if (std::dynamic_pointer_cast(matched_node)) { + } else if (ov::as_type_ptr(matched_node)) { auto if_op = ov::as_type_ptr(matched_node); std::vector> bodies; for (size_t i = 0; i < if_op->get_internal_subgraphs_size(); i++) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp index 6b77e62873d851..c9e1cd11c3c50c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/model.cpp @@ -22,7 +22,7 @@ get_input_info_by_model(const std::shared_ptr& model) { ov::conformance::InputInfo::Range ranges(ov::conformance::DEFAULT_MIN_VALUE, ov::conformance::DEFAULT_MAX_VALUE); bool is_const = false; if (ov::shape_size(node->get_output_shape(0)) != 0 && ov::op::util::is_constant(node)) { - std::shared_ptr constant = std::dynamic_pointer_cast(node); + std::shared_ptr constant = ov::as_type_ptr(node); auto const_ranges = get_const_ranges(constant, constant->get_default_output().get_element_type()); ranges = const_ranges; @@ -82,14 +82,14 @@ bool is_same_paired_op_cnt(const std::shared_ptr &fist_model, size_t second_paired_op_cnt = 0; for (auto& node : fist_model->get_ordered_ops()) { - if (std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node)) + if (ov::as_type_ptr(node) || + ov::as_type_ptr(node)) fist_paired_op_cnt++; } for (auto& node : second_model->get_ordered_ops()) { - if (std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node)) + if (ov::as_type_ptr(node) || + ov::as_type_ptr(node)) second_paired_op_cnt++; } @@ -99,11 +99,11 @@ bool is_same_paired_op_cnt(const std::shared_ptr &fist_model, bool build_control_dependency(std::shared_ptr &model) { std::map, std::shared_ptr>> dependency_pairs; for (auto& node : model->get_ordered_ops()) { - if (const auto& read_value = std::dynamic_pointer_cast(node)) { + if (const auto& read_value = ov::as_type_ptr(node)) { dependency_pairs[read_value->get_variable_id()].first = read_value; } - if (const auto& assign = std::dynamic_pointer_cast(node)) { + if (const auto& assign = ov::as_type_ptr(node)) { dependency_pairs[assign->get_variable_id()].second = assign; } } @@ -119,4 +119,4 @@ bool build_control_dependency(std::shared_ptr &model) { } } // namespace util -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp index a59905d4ba8ae9..8d24da58ce1941 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/src/utils/node.cpp @@ -86,7 +86,7 @@ get_input_info_by_node(const std::shared_ptr& node) { } ov::conformance::InputInfo in_info(node->get_input_partial_shape(port_id)); std::string input_name = input_node->get_friendly_name(); - if (std::dynamic_pointer_cast(input_node)) { + if (ov::as_type_ptr(input_node)) { if (ov::shape_size(input_node->get_output_shape(0)) == 0) { auto const_node = ov::as_type_ptr(input_node); in_info.is_const = true; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp index ab987a2140a5e1..7d8c1743b2cb74 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper/tests/cache/op_cache.cpp @@ -116,10 +116,10 @@ TEST_F(OpCacheUnitTest, update_cache_by_model) { // check cache ASSERT_EQ(m_ops_cache.size(), 2); for (const auto& cached_node : this->m_ops_cache) { - ASSERT_TRUE(std::dynamic_pointer_cast(cached_node.first) || - std::dynamic_pointer_cast(cached_node.first)); + ASSERT_TRUE(ov::as_type_ptr(cached_node.first) || + ov::as_type_ptr(cached_node.first)); auto meta = cached_node.second; - if (std::dynamic_pointer_cast(cached_node.first)) { + if (ov::as_type_ptr(cached_node.first)) { // check model_path ASSERT_EQ(meta.get_model_info().size(), 1); ASSERT_EQ(meta.get_model_info().begin()->first, test_model_name); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/utils/generate_static_shapes.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/utils/generate_static_shapes.cpp index 766de0fa314e36..a2ef484083b862 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/utils/generate_static_shapes.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/utils/generate_static_shapes.cpp @@ -50,7 +50,7 @@ namespace { InputShape generate(const std::shared_ptr& node, size_t in_port_id) { - const auto& param = std::dynamic_pointer_cast(node->get_input_node_shared_ptr(in_port_id)); + const auto& param = ov::as_type_ptr(node->get_input_node_shared_ptr(in_port_id)); std::vector staticShapes = { param->get_partial_shape().get_min_shape(), generate_mid_shape(param->get_partial_shape()), param->get_partial_shape().get_max_shape() }; diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index 74062a8ddb98a9..ffbec42fb1fac0 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -353,7 +353,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoBeforeExecution) { } int constCnt = 0; - std::shared_ptr getFunction = std::dynamic_pointer_cast(execGraph); + std::shared_ptr getFunction = ov::as_type_ptr(execGraph); ASSERT_NE(getFunction, nullptr); for (const auto& op : getFunction->get_ops()) { @@ -405,7 +405,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { int constCnt = 0; // Store all the layers from the executable graph information represented as CNNNetwork bool hasOpWithValidTime = false; - auto getFunction = std::dynamic_pointer_cast(execGraph); + auto getFunction = ov::as_type_ptr(execGraph); ASSERT_NE(nullptr, getFunction); for (const auto& op : getFunction->get_ops()) { diff --git a/src/tests/functional/shared_test_classes/src/base/utils/calculate_thresholds.cpp b/src/tests/functional/shared_test_classes/src/base/utils/calculate_thresholds.cpp index 92697e84e91114..a21ec8dd91e7a5 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/calculate_thresholds.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/calculate_thresholds.cpp @@ -21,7 +21,7 @@ calculate_thresholds_by_whole_model(const std::shared_ptr& model) { // check all operations except convert to generate correct values for (const auto& op : model->get_ordered_ops()) { - if (std::dynamic_pointer_cast(op)) { + if (ov::as_type_ptr(op)) { continue; } // check the default threshold for operations diff --git a/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp b/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp index 7fea75f338f0be..0d63044cdb495f 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/comparison.cpp @@ -66,7 +66,7 @@ void ComparisonLayerTest::SetUp() { std::shared_ptr second_input; if (second_input_type == InputLayerType::PARAMETER) { second_input = std::make_shared(model_type, inputDynamicShapes[1]); - inputs.push_back(std::dynamic_pointer_cast(second_input)); + inputs.push_back(ov::as_type_ptr(second_input)); } else { ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(model_type, targetStaticShapes.front()[1]); second_input = std::make_shared(tensor); diff --git a/src/tests/functional/shared_test_classes/src/single_op/reverse_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/reverse_sequence.cpp index f322aeffb2c62b..a20b59e5113032 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/reverse_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/reverse_sequence.cpp @@ -47,7 +47,7 @@ void ReverseSequenceLayerTest::SetUp() { secondary_input = std::make_shared(tensor); } else if (ov::test::utils::InputLayerType::PARAMETER == secondary_input_type) { secondary_input = std::make_shared(second_data_type, ov::Shape(second_input_shape)); - params.push_back(std::dynamic_pointer_cast(secondary_input)); + params.push_back(ov::as_type_ptr(secondary_input)); } else { throw std::runtime_error("Unsupported input type"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index b9b122b35b6a69..3208ab16ec4ac1 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -70,7 +70,7 @@ void QuantConvBackpropDataLayerTest::SetUp() { auto weightsFq = ov::test::utils::make_fake_quantize(weightsNode, element_type, quantLevels, weightsFqConstShapes); - auto convBackpropData = std::dynamic_pointer_cast( + auto convBackpropData = ov::as_type_ptr( ov::test::utils::make_convolution_backprop_data(dataFq, weightsFq, element_type, stride, padBegin, padEnd, dilation, padType)); ov::ResultVector results{std::make_shared(convBackpropData)}; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp index cb213749d9c997..0e64399cd69494 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp @@ -84,7 +84,7 @@ void QuantGroupConvLayerTest::SetUp() { weights = weightsNode; } - auto groupConv = std::dynamic_pointer_cast( + auto groupConv = ov::as_type_ptr( ov::test::utils::make_group_convolution(dataFq, weights, element_type, stride, padBegin, padEnd, dilation, padType)); ov::ResultVector results{std::make_shared(groupConv)}; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp index 45492b04c47302..86ffe543a0bd60 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp @@ -77,7 +77,7 @@ void QuantGroupConvBackpropDataLayerTest::SetUp() { auto weightsFq = ov::test::utils::make_fake_quantize(weightsNode, element_type, quantLevels, weightsFqConstShapes); - auto groupConvBackpropData = std::dynamic_pointer_cast( + auto groupConvBackpropData = ov::as_type_ptr( ov::test::utils::make_group_convolution_backprop_data(dataFq, weightsFq, element_type, stride, padBegin, padEnd, dilation, padType)); ov::ResultVector results{std::make_shared(groupConvBackpropData)}; diff --git a/src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp b/src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp index 25dad9a6ba7c29..6d5ac45d5c74c1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/elementwise.cpp @@ -109,7 +109,7 @@ std::shared_ptr ElementwiseFunction::getOriginalSubgraphWithConvoluti result = std::make_shared(result); result->set_friendly_name("result"); - ov::ResultVector results{ std::dynamic_pointer_cast(result) }; + ov::ResultVector results{ ov::as_type_ptr(result) }; return std::make_shared(results, ov::ParameterVector{ branch1.first, branch2.first }, "AddTransformation"); } diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp index ace041eae73f08..0454a5f2a58889 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp @@ -69,10 +69,10 @@ std::shared_ptr MultiplyFunction::get(const ov::element::Type model_p ov::ParameterVector inputs; if (ov::is_type(branchNodes1.input)) { - inputs.push_back(std::dynamic_pointer_cast(branchNodes1.input)); + inputs.push_back(ov::as_type_ptr(branchNodes1.input)); } if (ov::is_type(branchNodes2.input)) { - inputs.push_back(std::dynamic_pointer_cast(branchNodes2.input)); + inputs.push_back(ov::as_type_ptr(branchNodes2.input)); } return std::make_shared(results, inputs, "MultiplyTransformation"); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp index 68c4bb8433c1fd..742f3a90aaf0af 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp @@ -69,10 +69,10 @@ std::shared_ptr MultiplyPartialFunction::get(const ov::element::Type ov::ParameterVector inputs; if (ov::is_type(branchNodes1.input)) { - inputs.push_back(std::dynamic_pointer_cast(branchNodes1.input)); + inputs.push_back(ov::as_type_ptr(branchNodes1.input)); } if (ov::is_type(branchNodes2.input)) { - inputs.push_back(std::dynamic_pointer_cast(branchNodes2.input)); + inputs.push_back(ov::as_type_ptr(branchNodes2.input)); } return std::make_shared(results, inputs, "MultiplyTransformation"); diff --git a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp index 4b159890ddebae..5d361e1e2276db 100644 --- a/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp +++ b/src/tests/test_utils/common_test_utils/src/graph_comparator.cpp @@ -579,10 +579,10 @@ class CompareSubGraphs { } static int64_t get_num_iterations(ov::op::util::SubGraphOp* sub) { - if (const auto ti = dynamic_cast(sub)) { + if (const auto ti = ov::as_type(sub)) { return ti->get_num_iterations(); } - if (const auto l = dynamic_cast(sub)) { + if (const auto l = ov::as_type(sub)) { return l->get_num_iterations(); } @@ -724,8 +724,8 @@ Comparator::Result Comparator::compare(ov::Node* node1, ov::Node* node2, std::os typeInfoToStr(type_info1) + " != " + typeInfoToStr(type_info2)); } - auto subgraph1 = dynamic_cast(node1); - auto subgraph2 = dynamic_cast(node2); + auto subgraph1 = ov::as_type(node1); + auto subgraph2 = ov::as_type(node2); const bool subgraph_nodes = subgraph1 && subgraph2; diff --git a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp index 9c52c94a29ca9b..157c0e6628980b 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp @@ -157,7 +157,7 @@ ov::TensorVector infer_on_template(const std::shared_ptr& model, bool is_tensor_iterator_exist(const std::shared_ptr& model) { const auto& ops = model->get_ops(); for (const auto& node : ops) { - const auto& ti = std::dynamic_pointer_cast(node); + const auto& ti = ov::as_type_ptr(node); if (ti) { return true; } diff --git a/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp b/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp index a02645170cbc60..66f11b87e4d345 100644 --- a/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp +++ b/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp @@ -143,9 +143,8 @@ std::map OpSummary::getStatisticFromReport() { void OpSummary::updateOPsStats(const std::shared_ptr& model, const PassRate::Statuses& status, double k) { bool isFunctionalGraph = false; for (const auto& op : model->get_ordered_ops()) { - if (!std::dynamic_pointer_cast(op) && - !std::dynamic_pointer_cast(op) && - !std::dynamic_pointer_cast(op)) { + if (!ov::as_type_ptr(op) && !ov::as_type_ptr(op) && + !ov::as_type_ptr(op)) { // find all features isFunctionalGraph = true; break; @@ -153,24 +152,23 @@ void OpSummary::updateOPsStats(const std::shared_ptr& model, const Pa } for (const auto& op : model->get_ordered_ops()) { - if ((std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op)) && + if ((ov::as_type_ptr(op) || ov::as_type_ptr(op) || + ov::as_type_ptr(op)) && isFunctionalGraph) { continue; } if (extractBody) { - if (std::dynamic_pointer_cast(op)) { + if (ov::as_type_ptr(op)) { updateOPsStats(op->get_type_info(), status, k); auto ti = ov::as_type_ptr(op); auto ti_body = ti->get_function(); updateOPsStats(ti_body, status, k); - } else if (std::dynamic_pointer_cast(op)) { + } else if (ov::as_type_ptr(op)) { updateOPsStats(op->get_type_info(), status, k); auto loop = ov::as_type_ptr(op); auto loop_body = loop->get_function(); updateOPsStats(loop_body, status, k); - } else if (std::dynamic_pointer_cast(op)) { + } else if (ov::as_type_ptr(op)) { updateOPsStats(op->get_type_info(), status, k); auto if_op = ov::as_type_ptr(op); std::vector> bodies; @@ -190,26 +188,24 @@ void OpSummary::updateOPsImplStatus(const std::shared_ptr& model, con } bool isFunctionalGraph = false; for (const auto& op : model->get_ordered_ops()) { - if (!std::dynamic_pointer_cast(op) && - !std::dynamic_pointer_cast(op) && - !std::dynamic_pointer_cast(op)) { + if (!ov::as_type_ptr(op) && !ov::as_type_ptr(op) && + !ov::as_type_ptr(op)) { isFunctionalGraph = true; break; } } for (const auto& op : model->get_ordered_ops()) { - if ((std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op)) && + if ((ov::as_type_ptr(op) || ov::as_type_ptr(op) || + ov::as_type_ptr(op)) && isFunctionalGraph) { continue; - } else if (std::dynamic_pointer_cast(op)) { + } else if (ov::as_type_ptr(op)) { updateOPsImplStatus(op->get_type_info(), implStatus); auto ti = ov::as_type_ptr(op); auto ti_body = ti->get_function(); updateOPsImplStatus(ti_body, implStatus); - } else if (std::dynamic_pointer_cast(op)) { + } else if (ov::as_type_ptr(op)) { updateOPsImplStatus(op->get_type_info(), implStatus); auto loop = ov::as_type_ptr(op); auto loop_body = loop->get_function();