Skip to content

[mlir][AMDGPU] Add better load/store lowering for full select mask #146748

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 103 additions & 6 deletions mlir/lib/Dialect/AMDGPU/Transforms/MaskedloadToLoad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,45 @@ static LogicalResult baseInBufferAddrSpace(PatternRewriter &rewriter,
}

static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc,
vector::MaskedLoadOp maskedOp) {
vector::MaskedLoadOp maskedOp,
bool passthru) {
VectorType vectorType = maskedOp.getVectorType();
Value load = builder.create<vector::LoadOp>(
loc, vectorType, maskedOp.getBase(), maskedOp.getIndices());
Value res = builder.create<arith::SelectOp>(
loc, vectorType, maskedOp.getMask(), load, maskedOp.getPassThru());
return res;
if (passthru)
load = builder.create<arith::SelectOp>(loc, vectorType, maskedOp.getMask(),
load, maskedOp.getPassThru());
return load;
}

/// Check if the given value comes from a:
///
/// arith.select %cond, TRUE/FALSE, TRUE/FALSE
///
/// i.e the condition is either always true or it's always false.
///
/// Returns the condition to use for scf.if (condition) { true } else { false }.
static FailureOr<Value> matchFullSelect(OpBuilder &b, Value val) {
auto selectOp = val.getDefiningOp<arith::SelectOp>();
if (!selectOp)
return failure();
std::optional<int64_t> trueInt = getConstantIntValue(selectOp.getTrueValue());
std::optional<int64_t> falseInt =
getConstantIntValue(selectOp.getFalseValue());
if (!trueInt || !falseInt)
return failure();
// getConstantIntValue returns -1 for "true" for bools.
if (trueInt.value() == -1 && falseInt.value() == 0)
return selectOp.getCondition();

if (trueInt.value() == 0 && falseInt.value() == -1) {
Value cond = selectOp.getCondition();
Value one = b.create<arith::ConstantIntOp>(cond.getLoc(), /*value=*/true,
/*width=*/1);
Value inverse = b.create<arith::XOrIOp>(cond.getLoc(), cond, one);
Comment on lines +88 to +90
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the canonical way is to cmpi with 0

return inverse;
}
return failure();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Shouldn't this also check that the condition is a scalar?

}

static constexpr char kMaskedloadNeedsMask[] =
Expand All @@ -78,6 +110,16 @@ struct MaskedLoadLowering final : OpRewritePattern<vector::MaskedLoadOp> {
return failure();
}

// Check if this is either a full inbounds load or an empty, oob load. If
// so, take the fast path and don't generate a if condition, because we know
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
// so, take the fast path and don't generate a if condition, because we know
// so, take the fast path and don't generate an if condition, because we know

// doing the oob load is always safe.
if (succeeded(matchFullSelect(rewriter, maskedOp.getMask()))) {
Value load = createVectorLoadForMaskedLoad(rewriter, maskedOp.getLoc(),
maskedOp, /*passthru=*/true);
rewriter.replaceOp(maskedOp, load);
return success();
}

Location loc = maskedOp.getLoc();
Value src = maskedOp.getBase();

Expand Down Expand Up @@ -135,7 +177,8 @@ struct MaskedLoadLowering final : OpRewritePattern<vector::MaskedLoadOp> {
};

auto elseBuilder = [&](OpBuilder &builder, Location loc) {
Value res = createVectorLoadForMaskedLoad(builder, loc, maskedOp);
Value res = createVectorLoadForMaskedLoad(builder, loc, maskedOp,
/*passthru=*/true);
rewriter.create<scf::YieldOp>(loc, res);
};

Expand All @@ -148,11 +191,65 @@ struct MaskedLoadLowering final : OpRewritePattern<vector::MaskedLoadOp> {
}
};

struct FullMaskedLoadToConditionalLoad
: OpRewritePattern<vector::MaskedLoadOp> {
using OpRewritePattern::OpRewritePattern;

public:
Comment on lines +197 to +198
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
public:

LogicalResult matchAndRewrite(vector::MaskedLoadOp loadOp,
PatternRewriter &rewriter) const override {
FailureOr<Value> maybeCond = matchFullSelect(rewriter, loadOp.getMask());
if (failed(maybeCond)) {
return failure();
}

Value cond = maybeCond.value();
auto trueBuilder = [&](OpBuilder &builder, Location loc) {
Value res = createVectorLoadForMaskedLoad(builder, loc, loadOp,
/*passthru=*/false);
rewriter.create<scf::YieldOp>(loc, res);
};
auto falseBuilder = [&](OpBuilder &builder, Location loc) {
rewriter.create<scf::YieldOp>(loc, loadOp.getPassThru());
};
auto ifOp = rewriter.create<scf::IfOp>(loadOp.getLoc(), cond, trueBuilder,
falseBuilder);
rewriter.replaceOp(loadOp, ifOp);
return success();
}
};

struct FullMaskedStoreToConditionalStore
: OpRewritePattern<vector::MaskedStoreOp> {
using OpRewritePattern::OpRewritePattern;

public:
Comment on lines +225 to +226
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
public:

LogicalResult matchAndRewrite(vector::MaskedStoreOp storeOp,
PatternRewriter &rewriter) const override {
FailureOr<Value> maybeCond = matchFullSelect(rewriter, storeOp.getMask());
if (failed(maybeCond)) {
return failure();
}
Value cond = maybeCond.value();

auto trueBuilder = [&](OpBuilder &builder, Location loc) {
rewriter.create<vector::StoreOp>(loc, storeOp.getValueToStore(),
storeOp.getBase(), storeOp.getIndices());
rewriter.create<scf::YieldOp>(loc);
};
auto ifOp = rewriter.create<scf::IfOp>(storeOp.getLoc(), cond, trueBuilder);
rewriter.replaceOp(storeOp, ifOp);
return success();
}
};

} // namespace

void mlir::amdgpu::populateAmdgpuMaskedloadToLoadPatterns(
RewritePatternSet &patterns, PatternBenefit benefit) {
patterns.add<MaskedLoadLowering>(patterns.getContext(), benefit);
patterns.add<MaskedLoadLowering, FullMaskedLoadToConditionalLoad,
FullMaskedStoreToConditionalStore>(patterns.getContext(),
benefit);
}

struct AmdgpuMaskedloadToLoadPass final
Expand Down
25 changes: 25 additions & 0 deletions mlir/test/Dialect/AMDGPU/maskedload-to-load.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,28 @@ func.func @transfer_scalar(%mem : memref<8x8xf32, #amdgpu.address_space<fat_raw_
// CHECK: %[[IF:.*]] = scf.if
// CHECK: %[[LOAD:.*]] = vector.load %[[ARG0]][%[[ARG1]], %[[ARG1]]]
// CHECK: %[[SELECT:.*]] = arith.select %arg2, %[[LOAD]], %[[ARG3]]

// -----

func.func @transfer_to_maskedload_fatrawbuffer(%mem : memref<8x8xf32, #amdgpu.address_space<fat_raw_buffer>>, %idx : index, %mask : vector<4xi1>, %passthru : vector<4xf32>) -> vector<4xf32> {
%res = vector.maskedload %mem[%idx, %idx], %mask, %passthru : memref<8x8xf32, #amdgpu.address_space<fat_raw_buffer>>, vector<4xi1>, vector<4xf32> into vector<4xf32>
return %res : vector<4xf32>
}

// -----

func.func @full_select_maskedload_fatrawbuffer_to_load(%mem : memref<8x8xf16, #amdgpu.address_space<fat_raw_buffer>>, %idx : index, %cond : i1, %passthru : vector<4xf16>) -> vector<4xf16> {
%true = arith.constant dense<true> : vector<4xi1>
%false = arith.constant dense<false> : vector<4xi1>
%mask = arith.select %cond, %true, %false : vector<4xi1>
%res = vector.maskedload %mem[%idx, %idx], %mask, %passthru : memref<8x8xf16, #amdgpu.address_space<fat_raw_buffer>>, vector<4xi1>, vector<4xf16> into vector<4xf16>
return %res : vector<4xf16>
}

func.func @full_select_maskedload_to_load(%mem : memref<8x8xf16>, %idx : index, %cond : i1, %passthru : vector<4xf16>) -> vector<4xf16> {
%true = arith.constant dense<true> : vector<4xi1>
%false = arith.constant dense<false> : vector<4xi1>
%mask = arith.select %cond, %true, %false : vector<4xi1>
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should test for a vector condition as well

%res = vector.maskedload %mem[%idx, %idx], %mask, %passthru : memref<8x8xf16>, vector<4xi1>, vector<4xf16> into vector<4xf16>
return %res : vector<4xf16>
}
Loading