Skip to content

Commit da5b8ff

Browse files
committed
[MLIR][Affine] Fix affine data copy generate for zero-ranked memrefs
Fix affine data copy generate for zero-ranked memrefs. Fixes: #122210 and #61167 Test cases borrowed from https://reviews.llvm.org/D147298 Lewuathe <Kai Sasaki>
1 parent ecc7e6c commit da5b8ff

File tree

2 files changed

+94
-21
lines changed

2 files changed

+94
-21
lines changed

mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp

Lines changed: 29 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1828,14 +1828,14 @@ static void getMultiLevelStrides(const MemRefRegion &region,
18281828
}
18291829
}
18301830

1831-
/// Generates a point-wise copy from/to `memref' to/from `fastMemRef' and
1832-
/// returns the outermost AffineForOp of the copy loop nest. `lbMaps` and
1833-
/// `ubMaps` along with `lbOperands` and `ubOperands` hold the lower and upper
1834-
/// bound information for the copy loop nest. `fastBufOffsets` contain the
1835-
/// expressions to be subtracted out from the respective copy loop iterators in
1836-
/// order to index the fast buffer. If `copyOut' is true, generates a copy-out;
1837-
/// otherwise a copy-in. Builder `b` should be set to the point the copy nest is
1838-
/// inserted.
1831+
/// Generates a point-wise copy from/to a non-zero ranked `memref' to/from
1832+
/// `fastMemRef' and returns the outermost AffineForOp of the copy loop nest.
1833+
/// `lbMaps` and `ubMaps` along with `lbOperands` and `ubOperands` hold the
1834+
/// lower and upper bound information for the copy loop nest. `fastBufOffsets`
1835+
/// contain the expressions to be subtracted out from the respective copy loop
1836+
/// iterators in order to index the fast buffer. If `copyOut' is true, generates
1837+
/// a copy-out; otherwise a copy-in. Builder `b` should be set to the point the
1838+
/// copy nest is inserted.
18391839
//
18401840
/// The copy-in nest is generated as follows as an example for a 2-d region:
18411841
/// for x = ...
@@ -1856,6 +1856,8 @@ generatePointWiseCopy(Location loc, Value memref, Value fastMemRef,
18561856
}));
18571857

18581858
unsigned rank = cast<MemRefType>(memref.getType()).getRank();
1859+
// A copy nest can't be generated for 0-ranked memrefs.
1860+
assert(rank != 0 && "non-zero rank memref expected");
18591861
assert(lbMaps.size() == rank && "wrong number of lb maps");
18601862
assert(ubMaps.size() == rank && "wrong number of ub maps");
18611863

@@ -1919,19 +1921,20 @@ emitRemarkForBlock(Block &block) {
19191921
return block.getParentOp()->emitRemark();
19201922
}
19211923

1922-
/// Creates a buffer in the faster memory space for the specified memref region;
1923-
/// generates a copy from the lower memory space to this one, and replaces all
1924-
/// loads/stores in the block range [`begin', `end') of `block' to load/store
1925-
/// from that buffer. Returns failure if copies could not be generated due to
1926-
/// yet unimplemented cases. `copyInPlacementStart` and `copyOutPlacementStart`
1927-
/// in copyPlacementBlock specify the insertion points where the incoming copies
1928-
/// and outgoing copies, respectively, should be inserted (the insertion happens
1929-
/// right before the insertion point). Since `begin` can itself be invalidated
1930-
/// due to the memref rewriting done from this method, the output argument
1931-
/// `nBegin` is set to its replacement (set to `begin` if no invalidation
1932-
/// happens). Since outgoing copies could have been inserted at `end`, the
1933-
/// output argument `nEnd` is set to the new end. `sizeInBytes` is set to the
1934-
/// size of the fast buffer allocated.
1924+
/// Creates a buffer in the faster memory space for the specified memref region
1925+
/// (memref has to be non-zero ranked); generates a copy from the lower memory
1926+
/// space to this one, and replaces all loads/stores in the block range
1927+
/// [`begin', `end') of `block' to load/store from that buffer. Returns failure
1928+
/// if copies could not be generated due to yet unimplemented cases.
1929+
/// `copyInPlacementStart` and `copyOutPlacementStart` in copyPlacementBlock
1930+
/// specify the insertion points where the incoming copies and outgoing copies,
1931+
/// respectively, should be inserted (the insertion happens right before the
1932+
/// insertion point). Since `begin` can itself be invalidated due to the memref
1933+
/// rewriting done from this method, the output argument `nBegin` is set to its
1934+
/// replacement (set to `begin` if no invalidation happens). Since outgoing
1935+
/// copies could have been inserted at `end`, the output argument `nEnd` is set
1936+
/// to the new end. `sizeInBytes` is set to the size of the fast buffer
1937+
/// allocated.
19351938
static LogicalResult generateCopy(
19361939
const MemRefRegion &region, Block *block, Block::iterator begin,
19371940
Block::iterator end, Block *copyPlacementBlock,
@@ -1982,6 +1985,11 @@ static LogicalResult generateCopy(
19821985
SmallVector<Value, 4> bufIndices;
19831986

19841987
unsigned rank = memRefType.getRank();
1988+
if (rank == 0) {
1989+
LLVM_DEBUG(llvm::dbgs() << "Non-zero ranked memrefs supported\n");
1990+
return failure();
1991+
}
1992+
19851993
SmallVector<int64_t, 4> fastBufferShape;
19861994

19871995
// Compute the extents of the buffer.

mlir/test/Dialect/Affine/affine-data-copy.mlir

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -354,3 +354,68 @@ func.func @arbitrary_memory_space() {
354354
}
355355
return
356356
}
357+
358+
// CHECK-LABEL: zero_ranked
359+
func.func @zero_ranked(%3:memref<480xi1>) {
360+
%false = arith.constant false
361+
%4 = memref.alloc() {alignment = 128 : i64} : memref<i1>
362+
affine.store %false, %4[] : memref<i1>
363+
%5 = memref.alloc() {alignment = 128 : i64} : memref<i1>
364+
memref.copy %4, %5 : memref<i1> to memref<i1>
365+
affine.for %arg0 = 0 to 480 {
366+
%11 = affine.load %3[%arg0] : memref<480xi1>
367+
%12 = affine.load %5[] : memref<i1>
368+
%13 = arith.cmpi slt, %11, %12 : i1
369+
%14 = arith.select %13, %11, %12 : i1
370+
affine.store %14, %5[] : memref<i1>
371+
}
372+
return
373+
}
374+
375+
// CHECK-LABEL: func @scalar_memref_copy_without_dma
376+
func.func @scalar_memref_copy_without_dma() {
377+
%false = arith.constant false
378+
%4 = memref.alloc() {alignment = 128 : i64} : memref<i1>
379+
affine.store %false, %4[] : memref<i1>
380+
381+
// CHECK: %[[FALSE:.*]] = arith.constant false
382+
// CHECK: %[[MEMREF:.*]] = memref.alloc() {alignment = 128 : i64} : memref<i1>
383+
// CHECK: affine.store %[[FALSE]], %[[MEMREF]][] : memref<i1>
384+
return
385+
}
386+
387+
// CHECK-LABEL: func @scalar_memref_copy_in_loop
388+
func.func @scalar_memref_copy_in_loop(%3:memref<480xi1>) {
389+
%false = arith.constant false
390+
%4 = memref.alloc() {alignment = 128 : i64} : memref<i1>
391+
affine.store %false, %4[] : memref<i1>
392+
%5 = memref.alloc() {alignment = 128 : i64} : memref<i1>
393+
memref.copy %4, %5 : memref<i1> to memref<i1>
394+
affine.for %arg0 = 0 to 480 {
395+
%11 = affine.load %3[%arg0] : memref<480xi1>
396+
%12 = affine.load %5[] : memref<i1>
397+
%13 = arith.cmpi slt, %11, %12 : i1
398+
%14 = arith.select %13, %11, %12 : i1
399+
affine.store %14, %5[] : memref<i1>
400+
}
401+
402+
// CHECK: %[[FALSE:.*]] = arith.constant false
403+
// CHECK: %[[MEMREF:.*]] = memref.alloc() {alignment = 128 : i64} : memref<i1>
404+
// CHECK: affine.store %[[FALSE]], %[[MEMREF]][] : memref<i1>
405+
// CHECK: %[[TARGET:.*]] = memref.alloc() {alignment = 128 : i64} : memref<i1>
406+
// CHECK: memref.copy %alloc, %[[TARGET]] : memref<i1> to memref<i1>
407+
// CHECK: %[[FAST_MEMREF:.*]] = memref.alloc() : memref<480xi1>
408+
// CHECK: affine.for %{{.*}} = 0 to 480 {
409+
// CHECK: %{{.*}} = affine.load %arg0[%{{.*}}] : memref<480xi1>
410+
// CHECK: affine.store %{{.*}}, %[[FAST_MEMREF]][%{{.*}}] : memref<480xi1>
411+
// CHECK: }
412+
// CHECK: affine.for %arg1 = 0 to 480 {
413+
// CHECK: %[[L0:.*]] = affine.load %[[FAST_MEMREF]][%arg1] : memref<480xi1>
414+
// CHECK: %[[L1:.*]] = affine.load %[[TARGET]][] : memref<i1>
415+
// CHECK: %[[CMPI:.*]] = arith.cmpi slt, %[[L0]], %[[L1]] : i1
416+
// CHECK: %[[SELECT:.*]] = arith.select %[[CMPI]], %[[L0]], %[[L1]] : i1
417+
// CHECK: affine.store %[[SELECT]], %[[TARGET]][] : memref<i1>
418+
// CHECK: }
419+
// CHECK: memref.dealloc %[[FAST_MEMREF]] : memref<480xi1>
420+
return
421+
}

0 commit comments

Comments
 (0)