1
- // NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
2
1
// RUN: mlir-opt %s -sparsification | FileCheck %s
3
2
4
3
#SpVec = #sparse_tensor.encoding <{ map = (d0 ) -> (d0 : compressed) }>
17
16
}
18
17
19
18
// CHECK-LABEL: func @mul_inv_dense1d(
20
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
21
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32>,
22
- // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
19
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
20
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32>,
21
+ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
23
22
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
24
23
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index
25
24
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
@@ -57,13 +56,13 @@ func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
57
56
}
58
57
59
58
// CHECK-LABEL: func.func @mul_inv_sparse1d(
60
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
61
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>)
59
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
60
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>)
62
61
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
63
62
// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index
64
63
// CHECK: %[[VAL_4:.*]] = arith.constant 3 : index
65
64
// CHECK: %[[VAL_5:.*]] = arith.constant 0.000000e+00 : f32
66
- // CHECK: %[[VAL_6:.*]] = bufferization.alloc_tensor () : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
65
+ // CHECK: %[[VAL_6:.*]] = tensor.empty () : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
67
66
// CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
68
67
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
69
68
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
@@ -95,7 +94,7 @@ func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>,
95
94
// CHECK: return %[[VAL_32]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
96
95
func.func @mul_inv_sparse1d (%arga: tensor <32 xf32 , #SpVec >,
97
96
%argb: tensor <4 xf32 , #SpVec >) -> tensor <32 xf32 , #SpVec > {
98
- %argx = bufferization.alloc_tensor () : tensor <32 xf32 , #SpVec >
97
+ %argx = tensor.empty () : tensor <32 xf32 , #SpVec >
99
98
%0 = linalg.generic #trait1
100
99
ins (%arga , %argb: tensor <32 xf32 , #SpVec >, tensor <4 xf32 , #SpVec >)
101
100
outs (%argx: tensor <32 xf32 , #SpVec >) {
@@ -109,13 +108,13 @@ func.func @mul_inv_sparse1d(%arga: tensor<32xf32, #SpVec>,
109
108
110
109
111
110
// CHECK-LABEL: func.func @mul_inv_enc_dense1d(
112
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
113
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> {
111
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>,
112
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> {
114
113
// CHECK: %[[VAL_2:.*]] = arith.constant 32 : index
115
114
// CHECK: %[[VAL_3:.*]] = arith.constant 3 : index
116
115
// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
117
116
// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index
118
- // CHECK: %[[VAL_6:.*]] = bufferization.alloc_tensor () : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
117
+ // CHECK: %[[VAL_6:.*]] = tensor.empty () : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>>
119
118
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
120
119
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<4xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
121
120
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_6]] : tensor<32xf32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf32>
@@ -132,7 +131,7 @@ func.func @mul_inv_sparse1d(%arga: tensor<32xf32, #SpVec>,
132
131
// CHECK: }
133
132
func.func @mul_inv_enc_dense1d (%arga: tensor <32 xf32 , #EncDenseVec >,
134
133
%argb: tensor <4 xf32 , #EncDenseVec >) -> tensor <32 xf32 , #EncDenseVec > {
135
- %argx = bufferization.alloc_tensor () : tensor <32 xf32 , #EncDenseVec >
134
+ %argx = tensor.empty () : tensor <32 xf32 , #EncDenseVec >
136
135
%0 = linalg.generic #trait1
137
136
ins (%arga , %argb: tensor <32 xf32 , #EncDenseVec >, tensor <4 xf32 , #EncDenseVec >)
138
137
outs (%argx: tensor <32 xf32 , #EncDenseVec >) {
@@ -155,9 +154,9 @@ func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>,
155
154
}
156
155
157
156
// CHECK-LABEL: func @and_affine_dense1d(
158
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
159
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32>,
160
- // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi32>) -> tensor<32xi32> {
157
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
158
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32>,
159
+ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi32>) -> tensor<32xi32> {
161
160
// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0 : i32
162
161
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
163
162
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
@@ -195,12 +194,12 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
195
194
}
196
195
197
196
// CHECK-LABEL: func.func @and_affine_sparse1d(
198
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
199
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>>)
197
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>,
198
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32, #sparse_tensor.encoding<{{{.*}}}>>)
200
199
// CHECK: %[[VAL_2:.*]] = arith.constant 0 : index
201
200
// CHECK: %[[VAL_3:.*]] = arith.constant 1 : index
202
201
// CHECK: %[[VAL_4:.*]] = arith.constant 2 : index
203
- // CHECK: %[[VAL_5:.*]] = bufferization.alloc_tensor () : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
202
+ // CHECK: %[[VAL_5:.*]] = tensor.empty () : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
204
203
// CHECK: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
205
204
// CHECK: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
206
205
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xi32>
@@ -234,7 +233,7 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>,
234
233
// CHECK: return %[[VAL_33]] : tensor<32xi32, #sparse_tensor.encoding<{{{.*}}}>>
235
234
func.func @and_affine_sparse1d (%arga: tensor <32 xi32 , #SpVec >,
236
235
%argb: tensor <34 xi32 , #SpVec >) -> tensor <32 xi32 , #SpVec > {
237
- %argx = bufferization.alloc_tensor () : tensor <32 xi32 , #SpVec >
236
+ %argx = tensor.empty () : tensor <32 xi32 , #SpVec >
238
237
%0 = linalg.generic #trait2
239
238
ins (%arga , %argb: tensor <32 xi32 , #SpVec >, tensor <34 xi32 , #SpVec >)
240
239
outs (%argx: tensor <32 xi32 , #SpVec >) {
@@ -256,9 +255,9 @@ func.func @and_affine_sparse1d(%arga: tensor<32xi32, #SpVec>,
256
255
}
257
256
258
257
// CHECK-LABEL: func @mul_affine_dense2d(
259
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
260
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64>,
261
- // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
258
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
259
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64>,
260
+ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
262
261
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index
263
262
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 32 : index
264
263
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
@@ -304,8 +303,8 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
304
303
305
304
306
305
// CHECK-LABEL: func.func @mul_affine_sparse2d(
307
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
308
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> {
306
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>,
307
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> {
309
308
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 32 : index
310
309
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
311
310
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
@@ -314,7 +313,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
314
313
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index
315
314
// CHECK-DAG: %[[VAL_TRUE:.*]] = arith.constant true
316
315
// CHECK-DAG: %[[VAL_FALSE:.*]] = arith.constant false
317
- // CHECK: %[[VAL_8:.*]] = bufferization.alloc_tensor () : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
316
+ // CHECK: %[[VAL_8:.*]] = tensor.empty () : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
318
317
// CHECK: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
319
318
// CHECK: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xindex>
320
319
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref<?xf64>
@@ -360,7 +359,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>,
360
359
// CHECK: return %[[VAL_45]] : tensor<32x16xf64, #sparse_tensor.encoding<{{{.*}}}>>
361
360
func.func @mul_affine_sparse2d (%arga: tensor <32 x16 xf64 , #CSR >,
362
361
%argb: tensor <34 x19 xf64 , #CSR >) -> tensor <32 x16 xf64 , #CSR > {
363
- %argx = bufferization.alloc_tensor () : tensor <32 x16 xf64 , #CSR >
362
+ %argx = tensor.empty () : tensor <32 x16 xf64 , #CSR >
364
363
%0 = linalg.generic #trait3
365
364
ins (%arga , %argb: tensor <32 x16 xf64 , #CSR >, tensor <34 x19 xf64 , #CSR >)
366
365
outs (%argx: tensor <32 x16 xf64 , #CSR >) {
@@ -383,9 +382,9 @@ func.func @mul_affine_sparse2d(%arga: tensor<32x16xf64, #CSR>,
383
382
}
384
383
385
384
// CHECK-LABEL: func.func @mul_affine_dense_dim_2d(
386
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64, #sparse_tensor.encoding
387
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
388
- // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
385
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64, #sparse_tensor.encoding
386
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
387
+ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
389
388
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 19 : index
390
389
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
391
390
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
@@ -447,9 +446,9 @@ func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>,
447
446
}
448
447
449
448
// CHECK-LABEL: func.func @mul_const_affine_dense_dim_2d(
450
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64,
451
- // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
452
- // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
449
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64,
450
+ // CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse_tensor.encoding<{{{.*}}}>>,
451
+ // CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> {
453
452
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 19 : index
454
453
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2 : index
455
454
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index
0 commit comments