Skip to content

Commit

Permalink
Integrates LLVM @ a758bcdbd92efb64a3482eb95d2769d74e33f5bb (#18783)
Browse files Browse the repository at this point in the history
No local patch is carried.

---------

Signed-off-by: yzhang93 <zhyuhang88@gmail.com>
  • Loading branch information
yzhang93 authored Oct 16, 2024
1 parent 2945399 commit f9fa934
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class LLVMGPUPromoteMatmulToFitMMAPass final
OpBuilder::InsertionGuard guard(rewriter);
rewriter.setInsertionPointAfter(op);

SmallVector<bool> packPaddings(op.getNumDpsInputs(), noFold);
SmallVector<bool> nofoldFlags(op.getNumDpsInputs(), noFold);

SmallVector<Attribute> paddingValueAttributes;
for (auto &operand : op->getOpOperands()) {
Expand All @@ -58,7 +58,7 @@ class LLVMGPUPromoteMatmulToFitMMAPass final
.setPaddingDimensions(paddingDims)
.setPaddingValues(paddingValueAttributes)
.setPadToMultipleOf(padToMultipleOf)
.setPackPaddings(packPaddings)
.setPackPaddings(nofoldFlags)
.setCopyBackOp(linalg::LinalgPaddingOptions::CopyBackOp::None);

FailureOr<linalg::LinalgOp> result =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func.func @batch_matmul_dispatch_0_generic_128x80x320x32_f32() {
// DEFAULT: [0, 0, 0, 16]
// OPTIONS: [0, 0, 0, 8]
// CHECK: %[[PADDED:.+]], %{{.*}}, %{{.+}} = transform.structured.pad %tiled_linalg_op pad_to_multiple_of [1, 1, 1, 1]
// CHECK: pack_paddings = [1, 1, 1, 1], padding_dimensions = [0, 1, 2, 3]
// CHECK: nofold_flags = [1, 1, 1, 1], padding_dimensions = [0, 1, 2, 3]
// CHECK: padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: %[[V3:.+]] = transform.get_producer_of_operand %[[PADDED]][2]
// CHECK: transform.structured.hoist_pad %{{.*}} by 1 loops
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func.func @nchw_convolution() {
// CHECK: transform.structured.fuse_into_containing_op
// CHECK: transform.structured.tile_using_for %{{.*}}[0, 0, 0, 16]
// CHECK: transform.structured.fuse_into_containing_op
// CHECK: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1, 1] {copy_back_op = "none", pack_paddings = [1, 0, 1], padding_dimensions = [0, 1, 2, 3], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1, 1] {copy_back_op = "none", nofold_flags = [1, 0, 1], padding_dimensions = [0, 1, 2, 3], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: transform.structured.match ops{["linalg.fill"]}
// CHECK: %[[RES:.+]] = transform.get_producer_of_operand %{{.*}}[2]
// CHECK: transform.structured.rewrite_in_destination_passing_style %[[RES]]
Expand Down Expand Up @@ -89,7 +89,7 @@ func.func @nhwc_convolution() {

// CHECK: transform.named_sequence
// CHECK: transform.structured.tile_using_forall %{{.*}} tile_sizes [1, 128, 128](mapping = [#gpu.block<z>, #gpu.block<y>, #gpu.block<x>])
// CHECK: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1, 1] {copy_back_op = "none", pack_paddings = [0, 1, 1], padding_dimensions = [0, 1, 2, 3], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1, 1] {copy_back_op = "none", nofold_flags = [0, 1, 1], padding_dimensions = [0, 1, 2, 3], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: %[[RES:.+]] = transform.get_producer_of_operand %{{.*}}[2]
// CHECK: transform.structured.rewrite_in_destination_passing_style %[[RES]]
// CHECK: %[[LHS:.+]] = transform.get_producer_of_operand %{{.*}}[0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func.func @matmul_1() {
// CHECK: transform.structured.fuse_into_containing_op
// CHECK: transform.iree.populate_workgroup_count_region_using_num_threads_slice
// CHECK: transform.structured.tile_using_for %{{.*}}[0, 0, 16]
// CHECK: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1] {copy_back_op = "none", pack_paddings = [1, 1, 1], padding_dimensions = [0, 1, 2], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1] {copy_back_op = "none", nofold_flags = [1, 1, 1], padding_dimensions = [0, 1, 2], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// CHECK: transform.structured.hoist_pad %{{.}} by 1 loops
// CHECK: transform.structured.insert_slice_to_copy %{{.*}} : (!transform.any_op) -> !transform.any_op
// CHECK: transform.structured.tile_using_forall %{{.*}} num_threads [32, 4](mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>])
Expand Down Expand Up @@ -133,7 +133,7 @@ func.func @matmul_1() {
// WITH_OPTIONS: transform.iree.populate_workgroup_count_region_using_num_threads_slice
// The tiling is affected by td-matmul-strategy-reduc-size: 8.
// WITH_OPTIONS: transform.structured.tile_using_for %{{.*}}[0, 0, 8]
// WITH_OPTIONS: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1] {copy_back_op = "none", pack_paddings = [1, 1, 1], padding_dimensions = [0, 1, 2], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// WITH_OPTIONS: transform.structured.pad %{{.*}} pad_to_multiple_of [1, 1, 1] {copy_back_op = "none", nofold_flags = [1, 1, 1], padding_dimensions = [0, 1, 2], padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]}
// WITH_OPTIONS: transform.structured.hoist_pad %{{.}} by 1 loops
// WITH_OPTIONS: transform.structured.insert_slice_to_copy %{{.*}} : (!transform.any_op) -> !transform.any_op
// WITH_OPTIONS: transform.structured.tile_using_forall %{{.*}} num_threads [64, 2](mapping = [#gpu.thread<linear_dim_1>, #gpu.thread<linear_dim_0>])
Expand Down Expand Up @@ -308,7 +308,7 @@ func.func @matmul_4_partially_unaligned() {
// Make sure we do not canonicalize because the result is still aligned.
// CHECK-NEXT: transform.structured.pad %tiled_linalg_op
// CHECK-SAME: copy_back_op = "none"
// CHECK-SAME: pack_paddings = [1, 1, 1]
// CHECK-SAME: nofold_flags = [1, 1, 1]
// CHECK-SAME: padding_dimensions = [0, 1, 2]
// CHECK-SAME: padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
// CHECK: apply_patterns to %{{.*}} {
Expand Down Expand Up @@ -375,7 +375,7 @@ func.func @aligned_matmul() {
// Make sure we do not canonicalize if the result is aligned to avoid folding the extract_slice on the iterator.
// CHECK-NEXT: transform.structured.pad %tiled_linalg_op
// CHECK-SAME: copy_back_op = "none"
// CHECK-SAME: pack_paddings = [1, 1, 1]
// CHECK-SAME: nofold_flags = [1, 1, 1]
// CHECK-SAME: padding_dimensions = [0, 1, 2]
// CHECK-SAME: padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]

Expand Down
2 changes: 1 addition & 1 deletion third_party/llvm-project
Submodule llvm-project updated 1181 files

0 comments on commit f9fa934

Please sign in to comment.