Skip to content

[mlir][xegpu] Tensor descriptor type verifier #124548

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Feb 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
}];

let hasCustomAssemblyFormat = true;

let genVerifyDecl = 1;
}


Expand Down
82 changes: 79 additions & 3 deletions mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,10 @@ mlir::Type TensorDescType::parse(::mlir::AsmParser &parser) {
if (parser.parseGreater())
return {};

return TensorDescType::get(parser.getContext(), shape, elementType,
encoding.value_or(mlir::Attribute()),
sg_map.value_or(mlir::Attribute()));
return TensorDescType::getChecked(
[&]() { return parser.emitError(parser.getNameLoc()); },
parser.getContext(), shape, elementType,
encoding.value_or(mlir::Attribute()), sg_map.value_or(mlir::Attribute()));
}

void TensorDescType::print(::mlir::AsmPrinter &printer) const {
Expand Down Expand Up @@ -223,6 +224,81 @@ TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
return Base::get(context, shape, elementType, attr, sg_map);
}

LogicalResult TensorDescType::verify(
llvm::function_ref<::mlir::InFlightDiagnostic()> emitError,
llvm::ArrayRef<int64_t> shape, mlir::Type elementType,
mlir::Attribute encoding, mlir::Attribute sg_map) {
size_t rank = shape.size();
if (rank != 1 && rank != 2)
return emitError() << "expected 1D or 2D tensor";

auto scatterAttr = mlir::dyn_cast_if_present<ScatterTensorDescAttr>(encoding);
if (scatterAttr) {
// Expected tensor ranks for scattered data:
// - 1D tensor for fully non-contiguous elements (chunk size == 1)
// - 2D tensor for scattered blocks (chunk size > 1)
IntegerAttr chunkAttr = scatterAttr.getChunkSize();
unsigned chunkSize = chunkAttr ? chunkAttr.getInt() : 1;
if (rank == 1 && chunkSize != 1)
return emitError() << "expected non-contiguous elements for 1D tensor";
if (rank == 2 && chunkSize < 2)
return emitError() << "expected chunk blocks for 2D tensor";
}

if (auto blockAttr =
mlir::dyn_cast_if_present<BlockTensorDescAttr>(encoding)) {
MemorySpaceAttr memorySpaceAttr = blockAttr.getMemorySpace();
if (rank == 2 && memorySpaceAttr &&
memorySpaceAttr.getValue() == MemorySpace::SLM)
return emitError() << "SLM is not supported for 2D block tensor";
}

if (auto sgMapAttr = llvm::dyn_cast_if_present<SGMapAttr>(sg_map)) {
ArrayRef<uint32_t> wiLayout = sgMapAttr.getWiLayout();
ArrayRef<uint32_t> wiData = sgMapAttr.getWiData();

if (rank == 1) {
if (wiLayout[0] != 1 || wiData[0] != 1)
return emitError()
<< "outer layout distribution and data mapping must be 1 "
"for 1D tensor";
}

if (scatterAttr) {
// Validate subgroup mapping rules for scattered tensors.
// A work-item's slice of the tensor with shape [sg_size] or
// [sg_size, chunk_size] will be [1] or [1, chunks_size] respectively,
// the mapping should reflect that.
if (wiData[0] != 1)
return emitError()
<< "cannot map over non-contiguous scattered row elements";

IntegerAttr chunkAttr = scatterAttr.getChunkSize();
unsigned chunkSize = chunkAttr ? chunkAttr.getInt() : 1;
if (wiData[1] != chunkSize)
return emitError() << "work item data mapping must match the number of "
"contiguous elements";
}

// For 1D tensor, pad the shape with an outer unit dimension to allow common
// validation logic.
SmallVector<int64_t> tensorShape(shape.begin(), shape.end());
if (rank == 1)
tensorShape = {1, tensorShape.back()};

size_t dims = tensorShape.size();
for (size_t i = 0; i < dims; ++i) {
uint32_t numElemPerWi = wiLayout[i] * wiData[i];
if (tensorShape[i] < numElemPerWi || tensorShape[i] % numElemPerWi != 0)
return emitError() << "cannot distribute " << tensorShape[i] << " over "
<< wiLayout[i] << " work items with " << wiData[i]
<< " elements each";
}
}

return success();
}

} // namespace xegpu
} // namespace mlir

Expand Down
47 changes: 16 additions & 31 deletions mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,24 +81,28 @@ static bool isWriteHintOrNone(const CachePolicyAttr &attr) {
// each dimension.
static bool isArgShapesValid(ArrayRef<int64_t> descShape,
ArrayRef<int64_t> valShape, SGMapAttr sgMap) {
if (descShape == valShape) {
if (!sgMap)
return true;

// this can be relaxed if necessary by supporting non-2d shapes distribution
// until the constraints are defined this lives here instead of the tensor
// descriptor type.
return valShape.size() == sgMap.getWiLayout().size();
}
// Equal shapes with no distribution - no further verification needed.
if (descShape == valShape && !sgMap)
return true;

// Unknown distribution - cannot perform operation on partial shape.
if (!sgMap)
return false;

if (valShape.size() != descShape.size())
// Invalid rank or mixed rank usage.
size_t descRank = descShape.size();
if (descRank > 2 || valShape.size() != descRank)
return false;

// For 1D, SG map is guaranteed to be unit size in the outer dimension.
// Only take the distribution over the innermost dimension for validation.
ArrayRef<uint32_t> wiLayout = sgMap.getWiLayout();
SmallVector<uint32_t> mapLayout(wiLayout.begin(), wiLayout.end());
if (descRank == 1)
mapLayout = {wiLayout.back()};

for (const auto &[factor, dim, expected] :
llvm::zip_equal(sgMap.getWiLayout(), valShape, descShape)) {
llvm::zip_equal(mapLayout, valShape, descShape)) {
if (factor * dim != expected)
return false;
}
Expand Down Expand Up @@ -227,10 +231,6 @@ LogicalResult CreateNdDescOp::verify() {
if (getType().isScattered())
return emitOpError("Expects a non-scattered TensorDesc.\n");

if (getType().getRank() == 2 &&
tdescMemorySpace == static_cast<unsigned>(MemorySpace::SLM))
return emitOpError("SLM is not supported for 2D Block TensorDesc.\n");

return success();
}

Expand Down Expand Up @@ -454,22 +454,7 @@ LogicalResult CreateDescOp::verify() {
if (shape != tdescShape)
return emitOpError("Incorrect TensorDesc shape. ")
<< "Expected is " << makeString(shape) << "\n";
if (auto sgMap = tdescTy.getSGMapAttr()) {
// A work-item's slice of the TensorDesc with shape [sg_size] or
// [sg_size, chunk_size] will be [1] or [1, chunks_size] respectively,
// the mapping should reflect that.
if (sgMap.getWiData()[0] > 1)
return emitOpError("TensorDesc's SG map only supports multiple elements "
"contiguous along rows.");
if (chunkSize != static_cast<int>(sgMap.getWiData()[1]))
return emitOpError(
"TensorDesc's chunkSize must match WI's data mapping.");
if (int rank = tdescTy.getRank();
(sgMap.getWiLayout()[2 - rank] != tdescShape[0]))
return emitOpError("Detected a conflict between SG map's work-item "
"layout and TensorDesc shape. Check the index of "
"`subgroup_size` in WI layout map.");
}

return success();
}

Expand Down
22 changes: 22 additions & 0 deletions mlir/test/Dialect/XeGPU/XeGPUOps.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,16 @@ gpu.func @test_load_nd_vc_3(%src: memref<24x32xf32>) {
gpu.return
}

// CHECK: func @test_load_nd_vc_4(%[[arg0:.*]]: memref<24x32xf32>) {
gpu.func @test_load_nd_vc_4(%src: memref<24x32xf32>) {
// CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<32xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
%1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> ->
!xegpu.tensor_desc<32xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
// CHECK: %[[R1:.*]] = xegpu.load_nd %[[R0]] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<32xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>> -> vector<2xf32>
%2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<32xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>> -> vector<2xf32>
gpu.return
}

// CHECK: func @test_store_nd_vc(%[[arg0:.*]]: memref<24x32xf16>) {
gpu.func @test_store_nd_vc(%dst: memref<24x32xf16>) {
// CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<24x32xf16>
Expand Down Expand Up @@ -132,6 +142,18 @@ gpu.func @test_store_nd_vc_3(%src: memref<24x32xf16>) {
gpu.return
}

// CHECK: func @test_store_nd_vc_4(%[[arg0:.*]]: memref<24x32xf16>) {
gpu.func @test_store_nd_vc_4(%src: memref<24x32xf16>) {
// CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<2xf16>
%1 = arith.constant dense<1.0>: vector<2xf16>
// CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
%2 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf16> ->
!xegpu.tensor_desc<32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
// CHECK: xegpu.store_nd %[[C]], %[[R0]] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}> : vector<2xf16>, !xegpu.tensor_desc<32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
xegpu.store_nd %1, %2 <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<2xf16>, !xegpu.tensor_desc<32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
gpu.return
}

// CHECK: gpu.func @test_create_update_nd_tdesc_vc(%[[arg0:.*]]: memref<24x32xf32>) {
gpu.func @test_create_update_nd_tdesc_vc(%src: memref<24x32xf32>) {
// CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
Expand Down
Loading