@@ -528,6 +528,16 @@ func.func @extract_load_scalar(%arg0: memref<?xf32>, %arg1: index) -> f32 {
528
528
return %1 : f32
529
529
}
530
530
531
+ // CHECK-LABEL: @extract_load_index
532
+ // CHECK-SAME: (%[[ARG0:.*]]: memref<?xindex>, %[[ARG1:.*]]: index)
533
+ func.func @extract_load_index (%arg0: memref <?xindex >, %arg1: index ) -> index {
534
+ // CHECK: %[[RES:.*]] = memref.load %[[ARG0]][%[[ARG1]]] : memref<?xindex>
535
+ // CHECK: return %[[RES]] : index
536
+ %0 = vector.load %arg0 [%arg1 ] : memref <?xindex >, vector <4 xindex >
537
+ %1 = vector.extract %0 [0 ] : index from vector <4 xindex >
538
+ return %1 : index
539
+ }
540
+
531
541
// CHECK-LABEL: @extract_load_scalar_non_zero_off
532
542
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf32>, %[[ARG1:.*]]: index)
533
543
func.func @extract_load_scalar_non_zero_off (%arg0: memref <?xf32 >, %arg1: index ) -> f32 {
@@ -598,6 +608,18 @@ func.func @negative_extract_load_scalar_from_memref_of_vec(%arg0: memref<?xvecto
598
608
return %1 : f32
599
609
}
600
610
611
+ // CHECK-LABEL: @negative_extract_load_scalar_from_memref_of_i1
612
+ // CHECK-SAME: (%[[ARG0:.*]]: memref<?xi1>, %[[ARG1:.*]]: index)
613
+ func.func @negative_extract_load_scalar_from_memref_of_i1 (%arg0: memref <?xi1 >, %arg1: index ) -> i1 {
614
+ // Subbyte types are tricky, ignore them for now.
615
+ // CHECK: %[[RES:.*]] = vector.load %[[ARG0]][%[[ARG1]]] : memref<?xi1>, vector<8xi1>
616
+ // CHECK: %[[EXT:.*]] = vector.extract %[[RES]][0] : i1 from vector<8xi1>
617
+ // CHECK: return %[[EXT]] : i1
618
+ %0 = vector.load %arg0 [%arg1 ] : memref <?xi1 >, vector <8 xi1 >
619
+ %1 = vector.extract %0 [0 ] : i1 from vector <8 xi1 >
620
+ return %1 : i1
621
+ }
622
+
601
623
// CHECK-LABEL: @negative_extract_load_no_single_use
602
624
// CHECK-SAME: (%[[ARG0:.*]]: memref<?xf32>, %[[ARG1:.*]]: index)
603
625
func.func @negative_extract_load_no_single_use (%arg0: memref <?xf32 >, %arg1: index ) -> (f32 , vector <4 xf32 >) {
0 commit comments