Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PT FE] Add aten::rot90 #28224

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 67 additions & 0 deletions src/frontends/pytorch/src/op/rot90.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/pytorch/node_context.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/transpose.hpp"
#include "utils.hpp"

namespace ov {
namespace frontend {
namespace pytorch {
namespace op {

using namespace ov::op;

OutputVector translate_rot90(const NodeContext& context) {
num_inputs_check(context, 1, 3);
auto input = context.get_input(0);
int k = context.input_is_none(1) ? 1 : context.const_input<int64_t>(1);
std::vector<int64_t> dims = context.input_is_none(2) ? std::vector<int64_t>{0, 1}
Po-V marked this conversation as resolved.
Show resolved Hide resolved
: context.const_input<std::vector<int64_t>>(2);
const auto& partial_shape = input.get_partial_shape();
const auto ndims = partial_shape.rank().get_length();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please check that rank is static before get_length


PYTORCH_OP_CONVERSION_CHECK(dims.size() == 2,
"Expected total rotation dims == 2, but got dims = ",
dims.size());
PYTORCH_OP_CONVERSION_CHECK(ndims >= 2,
"Expected total dims >= 2, but got total dims = ",
ndims);
PYTORCH_OP_CONVERSION_CHECK(dims[0] != dims[1],
"Rotation dimensions must be different, but got dim0 = " +
std::to_string(dims[0]) + " and dim1 = " + std::to_string(dims[1]));

for (auto& dim : dims) {
dim = (dim + ndims) % ndims;
}
Po-V marked this conversation as resolved.
Show resolved Hide resolved

k = k % 4;
Output<Node> rotated;

if (k == 1 || k == 3) {
int64_t flip_dim = (k == 1) ? dims[1] : dims[0];
auto flip_dims = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {flip_dim}));
auto flipped = create_flip(input, flip_dims);
std::vector<int64_t> perm_values(ndims);
std::iota(perm_values.begin(), perm_values.end(), 0);
std::swap(perm_values[dims[0]], perm_values[dims[1]]);
auto perm = context.mark_node(
v0::Constant::create(element::i32, Shape{static_cast<size_t>(ndims)}, perm_values));
rotated = context.mark_node(std::make_shared<v1::Transpose>(flipped, perm));
} else if (k == 2) {
size_t dims_size = dims.size();
auto flip_dims = context.mark_node(v0::Constant::create(element::i32, Shape{dims_size}, dims));
rotated = create_flip(input, flip_dims);
Po-V marked this conversation as resolved.
Show resolved Hide resolved
} else {
rotated = input;
}

return {rotated};
};

} // namespace op
} // namespace pytorch
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,7 @@ OP_CONVERTER(translate_reshape_as);
OP_CONVERTER(translate_rnn);
OP_CONVERTER(translate_roi_align);
OP_CONVERTER(translate_roll);
OP_CONVERTER(translate_rot90);
OP_CONVERTER(translate_round);
OP_CONVERTER(translate_rsqrt);
OP_CONVERTER(translate_rsub);
Expand Down Expand Up @@ -624,6 +625,7 @@ const std::unordered_map<std::string, CreatorFunction> get_supported_ops_ts() {
{"aten::rnn_relu", op::translate_rnn},
{"aten::rnn_tanh", op::translate_rnn},
{"aten::roll", op::translate_roll},
{"aten::rot90", op::translate_rot90},
{"aten::round", op::translate_round},
{"aten::rsqrt", op::optional_out<op::translate_rsqrt, 1>},
{"aten::rsqrt_", op::inplace_op<op::translate_rsqrt>},
Expand Down
38 changes: 38 additions & 0 deletions tests/layer_tests/pytorch_tests/test_rot90.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Copyright (C) 2018-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import pytest
import numpy as np

from pytorch_layer_test_class import PytorchLayerTest


class TestRot90(PytorchLayerTest):
def _prepare_input(self):

x = np.arange(24).reshape(2, 3, 4).astype(np.float32)
return (x,)

def create_model(self, k, dims):
import torch

class aten_rot90(torch.nn.Module):
def __init__(self, k=1, dims=(0, 1)):
super(aten_rot90, self).__init__()
self.k = k
self.dims = dims

def forward(self, x):
return torch.rot90(x, self.k, self.dims)

ref_net = None
return aten_rot90(k, dims), ref_net, "aten::rot90"

@pytest.mark.parametrize("k", [1, 2, 3, 4, 5])
@pytest.mark.parametrize("dims", [(0, 1), (0, 2), (1, 2)])
@pytest.mark.nightly
@pytest.mark.precommit
@pytest.mark.precommit_torch_export
def test_rot90(self, k, dims, ie_device, precision, ir_version):
self._test(*self.create_model(k, dims), ie_device, precision, ir_version,
trace_model=True,dynamic_shapes=False)
Loading