@@ -129,9 +129,8 @@ define <vscale x 8 x i8> @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, <
129
129
define <vscale x 8 x i8 > @strided_vpload_nxv8i8_unit_stride (ptr %ptr , <vscale x 8 x i1 > %m , i32 zeroext %evl ) {
130
130
; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride:
131
131
; CHECK: # %bb.0:
132
- ; CHECK-NEXT: li a2, 1
133
132
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
134
- ; CHECK-NEXT: vlse8 .v v8, (a0), a2 , v0.t
133
+ ; CHECK-NEXT: vle8 .v v8, (a0), v0.t
135
134
; CHECK-NEXT: ret
136
135
%load = call <vscale x 8 x i8 > @llvm.experimental.vp.strided.load.nxv8i8.p0.i32 (ptr %ptr , i32 1 , <vscale x 8 x i1 > %m , i32 %evl )
137
136
ret <vscale x 8 x i8 > %load
@@ -200,9 +199,8 @@ define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride,
200
199
define <vscale x 4 x i16 > @strided_vpload_nxv4i16_unit_stride (ptr %ptr , <vscale x 4 x i1 > %m , i32 zeroext %evl ) {
201
200
; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride:
202
201
; CHECK: # %bb.0:
203
- ; CHECK-NEXT: li a2, 2
204
202
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
205
- ; CHECK-NEXT: vlse16 .v v8, (a0), a2 , v0.t
203
+ ; CHECK-NEXT: vle16 .v v8, (a0), v0.t
206
204
; CHECK-NEXT: ret
207
205
%load = call <vscale x 4 x i16 > @llvm.experimental.vp.strided.load.nxv4i16.p0.i32 (ptr %ptr , i32 2 , <vscale x 4 x i1 > %m , i32 %evl )
208
206
ret <vscale x 4 x i16 > %load
@@ -247,9 +245,8 @@ define <vscale x 2 x i32> @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride,
247
245
define <vscale x 2 x i32 > @strided_vpload_nxv2i32_unit_stride (ptr %ptr , <vscale x 2 x i1 > %m , i32 zeroext %evl ) {
248
246
; CHECK-LABEL: strided_vpload_nxv2i32_unit_stride:
249
247
; CHECK: # %bb.0:
250
- ; CHECK-NEXT: li a2, 4
251
248
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
252
- ; CHECK-NEXT: vlse32 .v v8, (a0), a2 , v0.t
249
+ ; CHECK-NEXT: vle32 .v v8, (a0), v0.t
253
250
; CHECK-NEXT: ret
254
251
%load = call <vscale x 2 x i32 > @llvm.experimental.vp.strided.load.nxv2i32.p0.i32 (ptr %ptr , i32 4 , <vscale x 2 x i1 > %m , i32 %evl )
255
252
ret <vscale x 2 x i32 > %load
@@ -306,9 +303,8 @@ define <vscale x 1 x i64> @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride,
306
303
define <vscale x 1 x i64 > @strided_vpload_nxv1i64_unit_stride (ptr %ptr , <vscale x 1 x i1 > %m , i32 zeroext %evl ) {
307
304
; CHECK-LABEL: strided_vpload_nxv1i64_unit_stride:
308
305
; CHECK: # %bb.0:
309
- ; CHECK-NEXT: li a2, 8
310
306
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
311
- ; CHECK-NEXT: vlse64 .v v8, (a0), a2 , v0.t
307
+ ; CHECK-NEXT: vle64 .v v8, (a0), v0.t
312
308
; CHECK-NEXT: ret
313
309
%load = call <vscale x 1 x i64 > @llvm.experimental.vp.strided.load.nxv1i64.p0.i32 (ptr %ptr , i32 8 , <vscale x 1 x i1 > %m , i32 %evl )
314
310
ret <vscale x 1 x i64 > %load
@@ -413,9 +409,8 @@ define <vscale x 4 x half> @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride
413
409
define <vscale x 4 x half > @strided_vpload_nxv4f16_unit_stride (ptr %ptr , <vscale x 4 x i1 > %m , i32 zeroext %evl ) {
414
410
; CHECK-LABEL: strided_vpload_nxv4f16_unit_stride:
415
411
; CHECK: # %bb.0:
416
- ; CHECK-NEXT: li a2, 2
417
412
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
418
- ; CHECK-NEXT: vlse16 .v v8, (a0), a2 , v0.t
413
+ ; CHECK-NEXT: vle16 .v v8, (a0), v0.t
419
414
; CHECK-NEXT: ret
420
415
%load = call <vscale x 4 x half > @llvm.experimental.vp.strided.load.nxv4f16.p0.i32 (ptr %ptr , i32 2 , <vscale x 4 x i1 > %m , i32 %evl )
421
416
ret <vscale x 4 x half > %load
@@ -460,9 +455,8 @@ define <vscale x 2 x float> @strided_vpload_nxv2f32(ptr %ptr, i32 signext %strid
460
455
define <vscale x 2 x float > @strided_vpload_nxv2f32_unit_stride (ptr %ptr , <vscale x 2 x i1 > %m , i32 zeroext %evl ) {
461
456
; CHECK-LABEL: strided_vpload_nxv2f32_unit_stride:
462
457
; CHECK: # %bb.0:
463
- ; CHECK-NEXT: li a2, 4
464
458
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
465
- ; CHECK-NEXT: vlse32 .v v8, (a0), a2 , v0.t
459
+ ; CHECK-NEXT: vle32 .v v8, (a0), v0.t
466
460
; CHECK-NEXT: ret
467
461
%load = call <vscale x 2 x float > @llvm.experimental.vp.strided.load.nxv2f32.p0.i32 (ptr %ptr , i32 4 , <vscale x 2 x i1 > %m , i32 %evl )
468
462
ret <vscale x 2 x float > %load
@@ -519,9 +513,8 @@ define <vscale x 1 x double> @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stri
519
513
define <vscale x 1 x double > @strided_vpload_nxv1f64_unit_stride (ptr %ptr , <vscale x 1 x i1 > %m , i32 zeroext %evl ) {
520
514
; CHECK-LABEL: strided_vpload_nxv1f64_unit_stride:
521
515
; CHECK: # %bb.0:
522
- ; CHECK-NEXT: li a2, 8
523
516
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
524
- ; CHECK-NEXT: vlse64 .v v8, (a0), a2 , v0.t
517
+ ; CHECK-NEXT: vle64 .v v8, (a0), v0.t
525
518
; CHECK-NEXT: ret
526
519
%load = call <vscale x 1 x double > @llvm.experimental.vp.strided.load.nxv1f64.p0.i32 (ptr %ptr , i32 8 , <vscale x 1 x i1 > %m , i32 %evl )
527
520
ret <vscale x 1 x double > %load
0 commit comments