Skip to content

Commit

Permalink
fix broadcast condition (#90)
Browse files Browse the repository at this point in the history
As titled.

To test:

`bazel test //...`
  • Loading branch information
zezhang authored Aug 15, 2024
1 parent a145d84 commit f3f29bf
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 2 deletions.
9 changes: 7 additions & 2 deletions lib/Conversion/TorchToTcp/Misc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ class ConvertAtenBroadcastLikeOps : public OpConversionPattern<AtenOpT> {

SmallVector<int64_t> axes;
SmallVector<Value> resultShape;
ArrayRef<int64_t> newInputShape =
input.getType().dyn_cast<RankedTensorType>().getShape();
for (int64_t i = 0; i < static_cast<int64_t>(newDimSizes.size()); ++i) {
Value newDimSize = newDimSizes[i];

Expand Down Expand Up @@ -111,9 +113,12 @@ class ConvertAtenBroadcastLikeOps : public OpConversionPattern<AtenOpT> {
? true
: staticDimSize != inputShape[i - newLeadingDims];

bool isInputDimBroadcastable = newInputShape[i] == 1;
// Note: The order of checks in this boolean expression matters!
if (isNewDim || isDynamicDim ||
(!isDimSizePreserved && doesDimSizeChange)) {
bool isOutputDimBroadcastable =
isNewDim || isDynamicDim ||
(!isDimSizePreserved && doesDimSizeChange);
if (isInputDimBroadcastable && isOutputDimBroadcastable) {
axes.push_back(i);
newDimSize = rewriter.create<torch::TorchConversion::ToI64Op>(
op->getLoc(), newDimSize);
Expand Down
21 changes: 21 additions & 0 deletions test/Conversion/TorchToTcp/misc.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -397,6 +397,27 @@ func.func @torch.aten.broadcast_to(%arg0: !torch.vtensor<[1,2,1,2],f32>) -> !tor

// -----

// CHECK-LABEL: @torch.aten.broadcast_to_with_dynamic_dim_input(
// CHECK-SAME: %[[ARG:.*]]: !torch.vtensor<[?,2736,1],f32>) -> !torch.vtensor<[?,2736,16],f32> {
// CHECK: %[[TENSOR:.*]] = torch_c.to_builtin_tensor %[[ARG]] : !torch.vtensor<[?,2736,1],f32> -> tensor<?x2736x1xf32>
// CHECK: %[[CONSTANT:.*]] = torch.constant.int 16
// CHECK: %[[CAST0:.*]] = torch_c.to_i64 %[[CONSTANT]]
// CHECK: %[[BROADCAST_DIM:.*]] = arith.index_cast %[[CAST0]] : i64 to index
// CHECK: %[[AFTER_BROADCAST:.*]] = tcp.broadcast %[[TENSOR]], %[[BROADCAST_DIM]] {axes = [2]} : tensor<?x2736x1xf32>, index -> tensor<?x2736x16xf32>
// CHECK: %[[OUT:.*]] = torch_c.from_builtin_tensor %[[AFTER_BROADCAST]] : tensor<?x2736x16xf32> -> !torch.vtensor<[?,2736,16],f32>
// CHECK: return %[[OUT]] : !torch.vtensor<[?,2736,16],f32>
func.func @torch.aten.broadcast_to_with_dynamic_dim_input(%arg0: !torch.vtensor<[?,2736,1],f32>) -> !torch.vtensor<[?,2736,16],f32> {
%int0 = torch.constant.int 0
%int2736 = torch.constant.int 2736
%int16 = torch.constant.int 16
%0 = torch.aten.size.int %arg0, %int0 : !torch.vtensor<[?,2736,1],f32>, !torch.int -> !torch.int
%1 = torch.prim.ListConstruct %0, %int2736, %int16 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int>
%2 = torch.aten.broadcast_to %arg0, %1 : !torch.vtensor<[?,2736,1],f32>, !torch.list<int> -> !torch.vtensor<[?,2736,16],f32>
return %2 : !torch.vtensor<[?,2736,16],f32>
}

// -----

// CHECK-LABEL: @torch.aten.broadcast_to_dynamic_dim(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[1,2],f32>, %[[ARG1:.*]]: !torch.vtensor<[?],f32>) -> !torch.vtensor<[?,2],f32> {
// CHECK-DAG: %[[ARG1_T:.*]] = torch_c.to_builtin_tensor %[[ARG1]] : !torch.vtensor<[?],f32> -> tensor<?xf32>
Expand Down

0 comments on commit f3f29bf

Please sign in to comment.