Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DRAFT][luci/service] Support dynamic shape for reshape #13935

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions compiler/common-artifacts/exclude.lst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
## TensorFlowLiteRecipes
optimize(Add_STR_000) # STRING is not supported
optimize(Add_STR_001) # STRING is not supported
optimize(Net_Gather_SparseToDense_AddV2_000) # Constant folding is not generally supported

## CircleRecipes

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ class Algorithm final : public luci::CircleNodeVisitor<loco::TensorShape>
// loco::TensorShape visit(const luci::CircleRelu0To1 *node) final;
// loco::TensorShape visit(const luci::CircleRelu6 *node) final;
// loco::TensorShape visit(const luci::CircleReluN1To1 *node) final;
// loco::TensorShape visit(const luci::CircleReshape *node) final;
loco::TensorShape visit(const luci::CircleReshape *node) final;
// loco::TensorShape visit(const luci::CircleResizeBilinear *node) final;
// loco::TensorShape visit(const luci::CircleResizeNearestNeighbor *node) final;
// loco::TensorShape visit(const luci::CircleReverseSequence *node) final;
Expand Down
87 changes: 0 additions & 87 deletions compiler/luci/service/src/CircleShapeInferenceRule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -996,84 +996,6 @@ loco::NodeShape infer_range(const luci::CircleRange *node)
return loco::NodeShape{output_shape};
}

loco::NodeShape infer_reshape(const luci::CircleReshape *node)
{
LOGGER(l);

const loco::DataType S32 = loco::DataType::S32;

loco::TensorShape shape_by_input;
{
LUCI_ASSERT(node->shape(), "2nd input shape() should not be nullptr");

// Only support node's shape() is CircleConst with S32
// TODO support other node with other types
auto const_shape_node = dynamic_cast<luci::CircleConst *>(node->shape());
if (const_shape_node != nullptr)
{
LUCI_ASSERT(const_shape_node->dtype() == S32, "Only support int32 CircleConst");

shape_by_input.rank(const_shape_node->size<S32>());

for (uint32_t axis = 0; axis < shape_by_input.rank(); ++axis)
{
shape_by_input.dim(axis) = const_shape_node->at<S32>(axis);
}
}
else
{
// We use shape from the node itself
shape_by_input = own_shape(node);
}
}

loco::TensorShape shape_by_attr;
{
shape_by_attr.rank(node->newShape()->rank());

for (uint32_t axis = 0; axis < shape_by_attr.rank(); ++axis)
{
shape_by_attr.dim(axis) = node->newShape()->dim(axis);
}
}

if (!(shape_by_input == shape_by_attr))
{
INFO(l) << "CircleReshape: Two new shape information mismatched : " << std::endl;
INFO(l) << " shape_by_input : " << shape_by_input << std::endl;
INFO(l) << " shape_by_attr : " << shape_by_attr << std::endl;
}

loco::TensorShape output_shape = shape_by_input;

// One of the dimensions can have special value -1, meaning its actual value should be inferred.
const auto input_shape = luci::shape_get(node->tensor()).as<loco::TensorShape>();
uint32_t input_element_count = 1;
uint32_t output_element_count = 1;
uint32_t unknown_dim_index = UINT32_MAX;
for (uint32_t i = 0; i < input_shape.rank(); ++i)
input_element_count *= (input_shape.dim(i).known() ? input_shape.dim(i).value() : 1);
for (uint32_t dim_index = 0; dim_index < output_shape.rank(); ++dim_index)
{
const uint32_t dim_value = output_shape.dim(dim_index).value();
if (static_cast<int>(dim_value) == -1)
{
LUCI_ASSERT(unknown_dim_index == UINT32_MAX, "More than one unknown dimension");
unknown_dim_index = dim_index;
}
else
{
output_element_count *= dim_value;
}
}
if (unknown_dim_index != UINT32_MAX)
{
output_shape.dim(unknown_dim_index) = input_element_count / output_element_count;
}

return loco::NodeShape{output_shape};
}

template <class CIRCLENODE> loco::NodeShape infer_resize_type(const CIRCLENODE *node)
{
auto input_shape = luci::shape_get(node->input()).template as<loco::TensorShape>();
Expand Down Expand Up @@ -2228,15 +2150,6 @@ class ShapeInferenceAlgorithm final : public luci::CircleNodeVisitor<loco::NodeS
return loco::NodeShape{input_shape};
}

/**
* @note CircleReshape has new shape info in two places: 2nd input and attribute.
* This shape inference uses shape from input 'shape' node when it's constant.
* If not, shape will be from node itself. shape from attribute is not used.
*
* TODO Change this policy when not appropriate
*/
loco::NodeShape visit(const luci::CircleReshape *node) final { return infer_reshape(node); }

loco::NodeShape visit(const luci::CircleResizeBilinear *node) final
{
return infer_resize_type(node);
Expand Down
136 changes: 136 additions & 0 deletions compiler/luci/service/src/Nodes/CircleReshape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,14 @@
* limitations under the License.
*/

#include "luci/Service/CircleShapeInference.h"
#include "Check.h"

#include "CircleShapeInferenceHelper.h"
#include "CircleCloneNode.h"

#include <oops/InternalExn.h>

namespace luci
{

Expand All @@ -34,4 +40,134 @@ luci::CircleNode *CloneNodeLet<CN::OPQR>::visit(const luci::CircleReshape *node)
return cloned;
}

namespace sinf
{

/**
* @note CircleReshape always has two inputs: tensor and shape.
* The shape input can be CircleConst, CircleOutputDummy, or CircleNode.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question,
What does the CircleOutputDummy mean?

Copy link
Member Author

@jongwonyang jongwonyang Sep 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question, What does the CircleOutputDummy mean?

While importing the circle file, CircleOutputDummy is added as node's shape input when there is no shape_by_input and no shape_by_attr.

auto *shape_node = (inputs.size() == 2) ? inputs.at(1) : nullptr;
if (shape_node == nullptr)
{
const auto *options = op.builtin_options.AsReshapeOptions();
if (options != nullptr)
shape_node = create_shape_node(options->new_shape, graph);
else
{
shape_node = graph->nodes()->create<CircleOutputDummy>();
shape_node->dtype(loco::DataType::S32);
shape_node->rank(0);
shape_node->name("Reshape/dummy");
}
}

* - If the shape input is CircleConst, the shape is inferred from the constant.
* - If the shape input is CircleOutputDummy, the shape is inferred from
* the attribute if it exists. If the attribute does not exist,
* the shape is inferred from the node iteself.
* - If the shape input is CircleNode, the shape is not inferred.
*/
loco::TensorShape Algorithm::visit(const luci::CircleReshape *node)
zetwhite marked this conversation as resolved.
Show resolved Hide resolved
{
const loco::DataType S32 = loco::DataType::S32;

// CircleReshape node must have reshape/shape
if (node->shape() == nullptr)
{
INTERNAL_EXN("2nd input shape() should not be nullptr");
}

bool should_infer = true;
loco::TensorShape output_shape;
{
// Check if reshape/shape is CircleConst
auto const_input = dynamic_cast<luci::CircleConst *>(node->shape());
if (const_input != nullptr)
{
output_shape.rank(const_input->size<S32>());

for (uint32_t axis = 0; axis < output_shape.rank(); ++axis)
{
output_shape.dim(axis) = const_input->at<S32>(axis);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If const_input has -1 value, the dimension value becomes 0xFFFFFFFF and known() == true.
When this happens, output_shape.dim(axis).unset() would be correct.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I missed it...!

Thanks for pointing really important thing out :)

I'll make the change.

if (const_input->at<S32>(axis) < 0)
{
output_shape.dim(axis).unset();
}
}
}
else
{
// Check if reshape/shape is CircleOutputDummy
auto dummy_input = dynamic_cast<luci::CircleOutputDummy *>(node->shape());
if (dummy_input != nullptr)
{
if (node->newShape()->rank() > 0)
{
output_shape.rank(node->newShape()->rank());

for (uint32_t axis = 0; axis < output_shape.rank(); ++axis)
{
output_shape.dim(axis) = node->newShape()->dim(axis);
if (node->newShape()->dim(axis) < 0)
{
output_shape.dim(axis).unset();
}
}
}
else
{
output_shape = circle_shape(node);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, to check my understanding, This part corresponds to 'get shape from own shape`.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, to check my understanding, This part corresponds to 'get shape from own shape`.

Yes, you're right :)

Copy link
Contributor

@llFreetimell llFreetimell Sep 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why own_shape is remained?
I thought that this will be removed because we implemented more perfect inference logic..!
(ref : #13912 (comment))

Copy link
Member Author

@jongwonyang jongwonyang Sep 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why own_shape is remained? I thought that this will be removed because we implemented more perfect inference logic..! (ref : #13912 (comment))

own_shape is remained to handle this recipe(Reshape_003) which has no attribute, no input shape.

image

I've read the related PRs (#1554, #1519) but I'm not sure how to handle this recipe.

Do you think we need to discuss about this recipe on #13927 ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

According to #13927 (comment), we may be able to revise the recipe first...!
@zetwhite How about your think? :)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@llFreetimell Thank you a lot for helping us 😄

I think @jongwonyang followed what i suggested in here : #13927 (comment)

  • regard Reshape X, attribute X a valid graph

    • while importing make Reshape/shape = DummyCircle

      • Since reshape IR allows only 2 inputs, there is no way to avoid creating a DummyCircle.
    • while shape inferencing if Reshape/shape = DummyCircle,

      • First, try to shape inference using attribute
      • Second, try to shape inference using output_shape

It was hard to make a policy about "no attribute, no shape input" case.
So, we try not to fix another code(importer, recipes) and do it best in shape inference logic.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, we try not to fix another code(importer, recipes) and do it best in shape inference logic

I don't mean to say it should be done this way. For now, I chose an easy way.
If you have any other thoughts, please feel free to share it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understood :D
Then, I agree with your direction, let's go with it!

}
}
else
{
// Check if reshape/shape is CircleNode
auto node_input = dynamic_cast<luci::CircleNode *>(node->shape());
if (node_input != nullptr)
{
output_shape.rank(node_input->dim(0).value());

for (uint32_t axis = 0; axis < output_shape.rank(); ++axis)
{
output_shape.dim(axis).unset();
}

should_infer = false;
}
}
}
}

const auto input = loco::must_cast<luci::CircleNode *>(node->tensor());
const auto input_shape = circle_shape(input);
uint32_t input_element_count = 1;
for (uint32_t axis = 0; axis < input_shape.rank(); ++axis)
{
if (input_shape.dim(axis).known())
{
input_element_count *= input_shape.dim(axis).value();
}
else
{
should_infer = false;
break;
}
}

if (should_infer)
{
uint32_t output_element_count = 1;
uint32_t unknown_dim_index = UINT32_MAX;
for (uint32_t dim_index = 0; dim_index < output_shape.rank(); ++dim_index)
{
if (output_shape.dim(dim_index).known() == false)
{
if (unknown_dim_index != UINT32_MAX)
{
INTERNAL_EXN("More than one unknown dimension");
}
unknown_dim_index = dim_index;
}
else
{
const uint32_t dim_value = output_shape.dim(dim_index).value();
output_element_count *= dim_value;
}
}
if (unknown_dim_index != UINT32_MAX)
{
output_shape.dim(unknown_dim_index) = input_element_count / output_element_count;
}
}

return output_shape;
}

} // namespace sinf

} // namespace luci
Loading