1- From 6a3b9a4936f774957b6a1cedcae40a355fb9670e Mon Sep 17 00:00:00 2001
1+ From e2a5c68fb109839d7ef07aa9d7c11119556ad0a7 Mon Sep 17 00:00:00 2001
22From: Garra1980 <
[email protected] >
3- Date: Wed, 20 Aug 2025 01:20:08 +0200
3+ Date: Sun, 19 Oct 2025 01:45:36 +0200
44Subject: [PATCH] xegpu temporary downstream defintion changes and vec
55
66---
7- mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td | 5 +++++
7+ mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td | 6 + +++++
88 mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp | 7 ++++++-
99 mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp | 10 ++++++----
10- 3 files changed, 17 insertions(+), 5 deletions(-)
10+ 3 files changed, 18 insertions(+), 5 deletions(-)
1111
1212diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
13- index eb54d6887681..b849c6b97d9d 100644
13+ index 426377fcf598..f77aaf0bd02e 100644
1414--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
1515+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
16- @@ -329 ,6 +329 ,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [
16+ @@ -358 ,6 +358 ,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [
1717 OptionalAttr<DenseI64ArrayAttr>: $const_offsets,
1818 OptionalAttr<UnitAttr>: $packed,
1919 OptionalAttr<DenseI64ArrayAttr>: $transpose,
2020+ OptionalAttr<I32Attr>: $transpose_bit_width,
2121 OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
2222 OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
2323 OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
24- @@ -1260,5 +1261 ,9 @@ def XeGPU_MemDescSubviewOp : XeGPU_Op<"mem_desc_subview" ,
24+ @@ -1398,4 +1399 ,9 @@ def XeGPU_StoreMatrixOp : XeGPU_Op<"store_matrix", [MemoryEffects<[MemWrite]> ,
2525 let hasVerifier = 1;
2626 }
2727
2828+ def XeGPU_CompileHintOp : XeGPU_Op<"compile_hint", []> {
2929+ let summary = "prevents the compiler from scheduling.";
3030+ let assemblyFormat = [{ attr-dict }];
3131+ }
32-
32+ +
3333 #endif // MLIR_DIALECT_XEGPU_IR_XEGPUOPS_TD
3434diff --git a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
35- index 819c2e5973ff..545f1d77156c 100644
35+ index e2c7d803e5a5..a55a9a233209 100644
3636--- a/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
3737+++ b/mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
38- @@ -485 ,7 +485 ,9 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
38+ @@ -535 ,7 +535 ,9 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
3939 // By default, no specific caching policy is assigned.
4040 xegpu::CachePolicyAttr hint = nullptr;
4141 auto loadOp = xegpu::LoadNdOp::create(rewriter, loc, vecTy, ndDesc,
@@ -45,7 +45,7 @@ index 819c2e5973ff..545f1d77156c 100644
4545 /*l1_hint=*/hint,
4646 /*l2_hint=*/hint, /*l3_hint=*/hint);
4747 rewriter.replaceOp(readOp, loadOp);
48- @@ -569 ,7 +571 ,10 @@ struct LoadLowering : public OpRewritePattern<vector::LoadOp> {
48+ @@ -684 ,7 +686 ,10 @@ struct LoadLowering : public OpRewritePattern<vector::LoadOp> {
4949 // By default, no specific caching policy is assigned.
5050 xegpu::CachePolicyAttr hint = nullptr;
5151 auto loadNdOp = xegpu::LoadNdOp::create(
@@ -58,18 +58,18 @@ index 819c2e5973ff..545f1d77156c 100644
5858 /*l2_hint=*/hint, /*l3_hint=*/hint);
5959 rewriter.replaceOp(loadOp, loadNdOp);
6060diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
61- index 906c71d8b8da..ecee53c56a54 100644
61+ index abd12e2e69ac..7d076503c1a9 100644
6262--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
6363+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
64- @@ -78 ,6 +78 ,7 @@ static bool isWriteHintOrNone(const CachePolicyAttr &attr) {
64+ @@ -71 ,6 +71 ,7 @@ static bool isWriteHintOrNone(const CachePolicyAttr &attr) {
6565 return true;
6666 auto kind = attr.getValue();
6767 return kind == CachePolicy::CACHED || kind == CachePolicy::UNCACHED ||
6868+ kind == CachePolicy::STREAMING ||
6969 kind == CachePolicy::WRITE_BACK || kind == CachePolicy::WRITE_THROUGH;
7070 }
7171
72- @@ -438 ,8 +439 ,8 @@ void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType,
72+ @@ -495 ,8 +496 ,8 @@ void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType,
7373 xegpu::CachePolicyAttr l3_hint) {
7474
7575 return build(builder, state, retType, tensorDesc, ValueRange(),
@@ -80,7 +80,7 @@ index 906c71d8b8da..ecee53c56a54 100644
8080 }
8181
8282 void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType,
83- @@ -455 ,7 +456 ,8 @@ void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType,
83+ @@ -512 ,7 +513 ,8 @@ void LoadNdOp::build(OpBuilder &builder, OperationState &state, Type retType,
8484 auto staticOffsetsAttr = builder.getDenseI64ArrayAttr(staticOffsets);
8585
8686 build(builder, state, retType, tensorDesc, dynamicOffsets, staticOffsetsAttr,
@@ -90,7 +90,7 @@ index 906c71d8b8da..ecee53c56a54 100644
9090 }
9191
9292 LogicalResult LoadNdOp::verify() {
93- @@ -517 ,7 +519 ,7 @@ LogicalResult LoadNdOp::verify() {
93+ @@ -574 ,7 +576 ,7 @@ LogicalResult LoadNdOp::verify() {
9494 mlir::emitWarning(getLoc()) << "Invalid transpose attr. It is ignored.";
9595 }
9696
0 commit comments