Skip to content

Commit 1f681b5

Browse files
joe-imgtru
authored andcommitted
[CodeGen] Clear InitUndef pass new register cache between pass runs (#90967)
Multiple invocations of the pass could interfere with eachother, preventing some undefs being initialised. I found it very difficult to create a unit test for this due to it being dependent on particular allocations of a previous function. However, the bug can be observed here: https://godbolt.org/z/7xnMo41Gv with the creation of the illegal instruction `vnsrl.wi v9, v8, 0`
1 parent 23eadbd commit 1f681b5

File tree

4 files changed

+85
-85
lines changed

4 files changed

+85
-85
lines changed

llvm/lib/CodeGen/InitUndef.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -272,6 +272,7 @@ bool InitUndef::runOnMachineFunction(MachineFunction &MF) {
272272
for (auto *DeadMI : DeadInsts)
273273
DeadMI->eraseFromParent();
274274
DeadInsts.clear();
275+
NewRegs.clear();
275276

276277
return Changed;
277278
}

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -567,16 +567,15 @@ define <8 x i32> @add_constant_rhs_8xi32_partial(<8 x i32> %vin, i32 %a, i32 %b,
567567
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
568568
; CHECK-NEXT: vslideup.vi v8, v10, 5
569569
; CHECK-NEXT: vmv.s.x v10, a2
570-
; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
571-
; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
572-
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
573-
; CHECK-NEXT: vle32.v v12, (a0)
574570
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
575571
; CHECK-NEXT: vslideup.vi v8, v10, 6
576-
; CHECK-NEXT: vmv.s.x v10, a3
572+
; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
573+
; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
577574
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
578-
; CHECK-NEXT: vslideup.vi v8, v10, 7
579-
; CHECK-NEXT: vadd.vv v8, v8, v12
575+
; CHECK-NEXT: vle32.v v10, (a0)
576+
; CHECK-NEXT: vmv.s.x v12, a3
577+
; CHECK-NEXT: vslideup.vi v8, v12, 7
578+
; CHECK-NEXT: vadd.vv v8, v8, v10
580579
; CHECK-NEXT: ret
581580
%vadd = add <8 x i32> %vin, <i32 1, i32 2, i32 3, i32 5, i32 undef, i32 undef, i32 undef, i32 undef>
582581
%e0 = add i32 %a, 23

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Lines changed: 48 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -2501,9 +2501,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
25012501
; RV64ZVE32F-NEXT: add a2, a0, a2
25022502
; RV64ZVE32F-NEXT: lw a2, 0(a2)
25032503
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2504-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2504+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
25052505
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2506-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2506+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
25072507
; RV64ZVE32F-NEXT: .LBB35_9: # %else14
25082508
; RV64ZVE32F-NEXT: andi a2, a1, 64
25092509
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -2546,9 +2546,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
25462546
; RV64ZVE32F-NEXT: add a2, a0, a2
25472547
; RV64ZVE32F-NEXT: lw a2, 0(a2)
25482548
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
2549-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2549+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
25502550
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2551-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2551+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
25522552
; RV64ZVE32F-NEXT: andi a2, a1, 32
25532553
; RV64ZVE32F-NEXT: bnez a2, .LBB35_8
25542554
; RV64ZVE32F-NEXT: j .LBB35_9
@@ -2652,9 +2652,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
26522652
; RV64ZVE32F-NEXT: add a2, a0, a2
26532653
; RV64ZVE32F-NEXT: lw a2, 0(a2)
26542654
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2655-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2655+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
26562656
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2657-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2657+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
26582658
; RV64ZVE32F-NEXT: .LBB36_9: # %else14
26592659
; RV64ZVE32F-NEXT: andi a2, a1, 64
26602660
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -2697,9 +2697,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
26972697
; RV64ZVE32F-NEXT: add a2, a0, a2
26982698
; RV64ZVE32F-NEXT: lw a2, 0(a2)
26992699
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
2700-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2700+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
27012701
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2702-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2702+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
27032703
; RV64ZVE32F-NEXT: andi a2, a1, 32
27042704
; RV64ZVE32F-NEXT: bnez a2, .LBB36_8
27052705
; RV64ZVE32F-NEXT: j .LBB36_9
@@ -2808,9 +2808,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
28082808
; RV64ZVE32F-NEXT: add a2, a0, a2
28092809
; RV64ZVE32F-NEXT: lw a2, 0(a2)
28102810
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2811-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2811+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
28122812
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2813-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2813+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
28142814
; RV64ZVE32F-NEXT: .LBB37_9: # %else14
28152815
; RV64ZVE32F-NEXT: andi a2, a1, 64
28162816
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -2856,9 +2856,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
28562856
; RV64ZVE32F-NEXT: add a2, a0, a2
28572857
; RV64ZVE32F-NEXT: lw a2, 0(a2)
28582858
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
2859-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2859+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
28602860
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
2861-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
2861+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
28622862
; RV64ZVE32F-NEXT: andi a2, a1, 32
28632863
; RV64ZVE32F-NEXT: bnez a2, .LBB37_8
28642864
; RV64ZVE32F-NEXT: j .LBB37_9
@@ -2966,9 +2966,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
29662966
; RV64ZVE32F-NEXT: add a2, a0, a2
29672967
; RV64ZVE32F-NEXT: lw a2, 0(a2)
29682968
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
2969-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
2969+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
29702970
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
2971-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
2971+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
29722972
; RV64ZVE32F-NEXT: .LBB38_9: # %else14
29732973
; RV64ZVE32F-NEXT: andi a2, a1, 64
29742974
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -3011,9 +3011,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
30113011
; RV64ZVE32F-NEXT: add a2, a0, a2
30123012
; RV64ZVE32F-NEXT: lw a2, 0(a2)
30133013
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
3014-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
3014+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
30153015
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
3016-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
3016+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
30173017
; RV64ZVE32F-NEXT: andi a2, a1, 32
30183018
; RV64ZVE32F-NEXT: bnez a2, .LBB38_8
30193019
; RV64ZVE32F-NEXT: j .LBB38_9
@@ -3118,9 +3118,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
31183118
; RV64ZVE32F-NEXT: add a2, a0, a2
31193119
; RV64ZVE32F-NEXT: lw a2, 0(a2)
31203120
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
3121-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
3121+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
31223122
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
3123-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
3123+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
31243124
; RV64ZVE32F-NEXT: .LBB39_9: # %else14
31253125
; RV64ZVE32F-NEXT: andi a2, a1, 64
31263126
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -3163,9 +3163,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
31633163
; RV64ZVE32F-NEXT: add a2, a0, a2
31643164
; RV64ZVE32F-NEXT: lw a2, 0(a2)
31653165
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
3166-
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
3166+
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
31673167
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
3168-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
3168+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
31693169
; RV64ZVE32F-NEXT: andi a2, a1, 32
31703170
; RV64ZVE32F-NEXT: bnez a2, .LBB39_8
31713171
; RV64ZVE32F-NEXT: j .LBB39_9
@@ -3275,9 +3275,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
32753275
; RV64ZVE32F-NEXT: add a3, a0, a3
32763276
; RV64ZVE32F-NEXT: lw a3, 0(a3)
32773277
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
3278-
; RV64ZVE32F-NEXT: vmv.s.x v8, a3
3278+
; RV64ZVE32F-NEXT: vmv.s.x v12, a3
32793279
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
3280-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
3280+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
32813281
; RV64ZVE32F-NEXT: .LBB40_9: # %else14
32823282
; RV64ZVE32F-NEXT: andi a3, a2, 64
32833283
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -3323,9 +3323,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
33233323
; RV64ZVE32F-NEXT: add a3, a0, a3
33243324
; RV64ZVE32F-NEXT: lw a3, 0(a3)
33253325
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
3326-
; RV64ZVE32F-NEXT: vmv.s.x v8, a3
3326+
; RV64ZVE32F-NEXT: vmv.s.x v12, a3
33273327
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
3328-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
3328+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
33293329
; RV64ZVE32F-NEXT: andi a3, a2, 32
33303330
; RV64ZVE32F-NEXT: bnez a3, .LBB40_8
33313331
; RV64ZVE32F-NEXT: j .LBB40_9
@@ -8200,9 +8200,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
82008200
; RV64ZVE32F-NEXT: add a2, a0, a2
82018201
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
82028202
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8203-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8203+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
82048204
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8205-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8205+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
82068206
; RV64ZVE32F-NEXT: .LBB74_9: # %else14
82078207
; RV64ZVE32F-NEXT: andi a2, a1, 64
82088208
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -8245,9 +8245,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
82458245
; RV64ZVE32F-NEXT: add a2, a0, a2
82468246
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
82478247
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
8248-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8248+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
82498249
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8250-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8250+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
82518251
; RV64ZVE32F-NEXT: andi a2, a1, 32
82528252
; RV64ZVE32F-NEXT: bnez a2, .LBB74_8
82538253
; RV64ZVE32F-NEXT: j .LBB74_9
@@ -8351,9 +8351,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
83518351
; RV64ZVE32F-NEXT: add a2, a0, a2
83528352
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
83538353
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8354-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8354+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
83558355
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8356-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8356+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
83578357
; RV64ZVE32F-NEXT: .LBB75_9: # %else14
83588358
; RV64ZVE32F-NEXT: andi a2, a1, 64
83598359
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -8396,9 +8396,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
83968396
; RV64ZVE32F-NEXT: add a2, a0, a2
83978397
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
83988398
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
8399-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8399+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
84008400
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8401-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8401+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
84028402
; RV64ZVE32F-NEXT: andi a2, a1, 32
84038403
; RV64ZVE32F-NEXT: bnez a2, .LBB75_8
84048404
; RV64ZVE32F-NEXT: j .LBB75_9
@@ -8507,9 +8507,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
85078507
; RV64ZVE32F-NEXT: add a2, a0, a2
85088508
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
85098509
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8510-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8510+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
85118511
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8512-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8512+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
85138513
; RV64ZVE32F-NEXT: .LBB76_9: # %else14
85148514
; RV64ZVE32F-NEXT: andi a2, a1, 64
85158515
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
@@ -8555,9 +8555,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
85558555
; RV64ZVE32F-NEXT: add a2, a0, a2
85568556
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
85578557
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
8558-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8558+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
85598559
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8560-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8560+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
85618561
; RV64ZVE32F-NEXT: andi a2, a1, 32
85628562
; RV64ZVE32F-NEXT: bnez a2, .LBB76_8
85638563
; RV64ZVE32F-NEXT: j .LBB76_9
@@ -8665,9 +8665,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
86658665
; RV64ZVE32F-NEXT: add a2, a0, a2
86668666
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
86678667
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8668-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8668+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
86698669
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8670-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8670+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
86718671
; RV64ZVE32F-NEXT: .LBB77_9: # %else14
86728672
; RV64ZVE32F-NEXT: andi a2, a1, 64
86738673
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -8710,9 +8710,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
87108710
; RV64ZVE32F-NEXT: add a2, a0, a2
87118711
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
87128712
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
8713-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8713+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
87148714
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8715-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8715+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
87168716
; RV64ZVE32F-NEXT: andi a2, a1, 32
87178717
; RV64ZVE32F-NEXT: bnez a2, .LBB77_8
87188718
; RV64ZVE32F-NEXT: j .LBB77_9
@@ -8817,9 +8817,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
88178817
; RV64ZVE32F-NEXT: add a2, a0, a2
88188818
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
88198819
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8820-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8820+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
88218821
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8822-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8822+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
88238823
; RV64ZVE32F-NEXT: .LBB78_9: # %else14
88248824
; RV64ZVE32F-NEXT: andi a2, a1, 64
88258825
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -8862,9 +8862,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
88628862
; RV64ZVE32F-NEXT: add a2, a0, a2
88638863
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
88648864
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
8865-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8865+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
88668866
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
8867-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
8867+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
88688868
; RV64ZVE32F-NEXT: andi a2, a1, 32
88698869
; RV64ZVE32F-NEXT: bnez a2, .LBB78_8
88708870
; RV64ZVE32F-NEXT: j .LBB78_9
@@ -8974,9 +8974,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
89748974
; RV64ZVE32F-NEXT: add a3, a0, a3
89758975
; RV64ZVE32F-NEXT: flw fa5, 0(a3)
89768976
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
8977-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
8977+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
89788978
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
8979-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
8979+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5
89808980
; RV64ZVE32F-NEXT: .LBB79_9: # %else14
89818981
; RV64ZVE32F-NEXT: andi a3, a2, 64
89828982
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
@@ -9022,9 +9022,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
90229022
; RV64ZVE32F-NEXT: add a3, a0, a3
90239023
; RV64ZVE32F-NEXT: flw fa5, 0(a3)
90249024
; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
9025-
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
9025+
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
90269026
; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma
9027-
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4
9027+
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
90289028
; RV64ZVE32F-NEXT: andi a3, a2, 32
90299029
; RV64ZVE32F-NEXT: bnez a3, .LBB79_8
90309030
; RV64ZVE32F-NEXT: j .LBB79_9

0 commit comments

Comments
 (0)