Skip to content

Commit 128c6ed

Browse files
committed
[RISCV] Teach VSETVLInsert to eliminate redundant vsetvli for vmv.s.x and vfmv.s.f.
Differential Revision: https://reviews.llvm.org/D116307
1 parent 550d90e commit 128c6ed

File tree

7 files changed

+107
-56
lines changed

7 files changed

+107
-56
lines changed

llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Lines changed: 89 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,13 @@ class VSETVLIInfo {
5959
uint8_t MaskAgnostic : 1;
6060
uint8_t MaskRegOp : 1;
6161
uint8_t StoreOp : 1;
62+
uint8_t ScalarMovOp : 1;
6263
uint8_t SEWLMULRatioOnly : 1;
6364

6465
public:
6566
VSETVLIInfo()
6667
: AVLImm(0), TailAgnostic(false), MaskAgnostic(false), MaskRegOp(false),
67-
StoreOp(false), SEWLMULRatioOnly(false) {}
68+
StoreOp(false), ScalarMovOp(false), SEWLMULRatioOnly(false) {}
6869

6970
static VSETVLIInfo getUnknown() {
7071
VSETVLIInfo Info;
@@ -96,6 +97,18 @@ class VSETVLIInfo {
9697
assert(hasAVLImm());
9798
return AVLImm;
9899
}
100+
bool hasZeroAVL() const {
101+
if (hasAVLImm())
102+
return getAVLImm() == 0;
103+
return false;
104+
}
105+
bool hasNonZeroAVL() const {
106+
if (hasAVLImm())
107+
return getAVLImm() > 0;
108+
if (hasAVLReg())
109+
return getAVLReg() == RISCV::X0;
110+
return false;
111+
}
99112

100113
bool hasSameAVL(const VSETVLIInfo &Other) const {
101114
assert(isValid() && Other.isValid() &&
@@ -120,7 +133,7 @@ class VSETVLIInfo {
120133
MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
121134
}
122135
void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO,
123-
bool IsStore) {
136+
bool IsStore, bool IsScalarMovOp) {
124137
assert(isValid() && !isUnknown() &&
125138
"Can't set VTYPE for uninitialized or unknown");
126139
VLMul = L;
@@ -129,6 +142,7 @@ class VSETVLIInfo {
129142
MaskAgnostic = MA;
130143
MaskRegOp = MRO;
131144
StoreOp = IsStore;
145+
ScalarMovOp = IsScalarMovOp;
132146
}
133147

134148
unsigned encodeVTYPE() const {
@@ -139,6 +153,16 @@ class VSETVLIInfo {
139153

140154
bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; }
141155

156+
bool hasSameSEW(const VSETVLIInfo &Other) const {
157+
assert(isValid() && Other.isValid() &&
158+
"Can't compare invalid VSETVLIInfos");
159+
assert(!isUnknown() && !Other.isUnknown() &&
160+
"Can't compare VTYPE in unknown state");
161+
assert(!SEWLMULRatioOnly && !Other.SEWLMULRatioOnly &&
162+
"Can't compare when only LMUL/SEW ratio is valid.");
163+
return SEW == Other.SEW;
164+
}
165+
142166
bool hasSameVTYPE(const VSETVLIInfo &Other) const {
143167
assert(isValid() && Other.isValid() &&
144168
"Can't compare invalid VSETVLIInfos");
@@ -178,6 +202,15 @@ class VSETVLIInfo {
178202
return getSEWLMULRatio() == Other.getSEWLMULRatio();
179203
}
180204

205+
bool hasSamePolicy(const VSETVLIInfo &Other) const {
206+
assert(isValid() && Other.isValid() &&
207+
"Can't compare invalid VSETVLIInfos");
208+
assert(!isUnknown() && !Other.isUnknown() &&
209+
"Can't compare VTYPE in unknown state");
210+
return TailAgnostic == Other.TailAgnostic &&
211+
MaskAgnostic == Other.MaskAgnostic;
212+
}
213+
181214
bool hasCompatibleVTYPE(const VSETVLIInfo &InstrInfo, bool Strict) const {
182215
// Simple case, see if full VTYPE matches.
183216
if (hasSameVTYPE(InstrInfo))
@@ -222,6 +255,15 @@ class VSETVLIInfo {
222255
return true;
223256
}
224257

258+
// For vmv.s.x and vfmv.s.f, there is only two behaviors, VL = 0 and VL > 0.
259+
// So it's compatible when we could make sure that both VL be the same
260+
// situation.
261+
if (!Strict && InstrInfo.ScalarMovOp && InstrInfo.hasAVLImm() &&
262+
((hasNonZeroAVL() && InstrInfo.hasNonZeroAVL()) ||
263+
(hasZeroAVL() && InstrInfo.hasZeroAVL())) &&
264+
hasSameSEW(InstrInfo) && hasSamePolicy(InstrInfo))
265+
return true;
266+
225267
// The AVL must match.
226268
if (!hasSameAVL(InstrInfo))
227269
return false;
@@ -414,6 +456,42 @@ static MachineInstr *elideCopies(MachineInstr *MI,
414456
}
415457
}
416458

459+
static bool isScalarMoveInstr(const MachineInstr &MI) {
460+
switch (MI.getOpcode()) {
461+
default:
462+
return false;
463+
case RISCV::PseudoVMV_S_X_M1:
464+
case RISCV::PseudoVMV_S_X_M2:
465+
case RISCV::PseudoVMV_S_X_M4:
466+
case RISCV::PseudoVMV_S_X_M8:
467+
case RISCV::PseudoVMV_S_X_MF2:
468+
case RISCV::PseudoVMV_S_X_MF4:
469+
case RISCV::PseudoVMV_S_X_MF8:
470+
case RISCV::PseudoVFMV_F16_S_M1:
471+
case RISCV::PseudoVFMV_F16_S_M2:
472+
case RISCV::PseudoVFMV_F16_S_M4:
473+
case RISCV::PseudoVFMV_F16_S_M8:
474+
case RISCV::PseudoVFMV_F16_S_MF2:
475+
case RISCV::PseudoVFMV_F16_S_MF4:
476+
case RISCV::PseudoVFMV_F16_S_MF8:
477+
case RISCV::PseudoVFMV_F32_S_M1:
478+
case RISCV::PseudoVFMV_F32_S_M2:
479+
case RISCV::PseudoVFMV_F32_S_M4:
480+
case RISCV::PseudoVFMV_F32_S_M8:
481+
case RISCV::PseudoVFMV_F32_S_MF2:
482+
case RISCV::PseudoVFMV_F32_S_MF4:
483+
case RISCV::PseudoVFMV_F32_S_MF8:
484+
case RISCV::PseudoVFMV_F64_S_M1:
485+
case RISCV::PseudoVFMV_F64_S_M2:
486+
case RISCV::PseudoVFMV_F64_S_M4:
487+
case RISCV::PseudoVFMV_F64_S_M8:
488+
case RISCV::PseudoVFMV_F64_S_MF2:
489+
case RISCV::PseudoVFMV_F64_S_MF4:
490+
case RISCV::PseudoVFMV_F64_S_MF8:
491+
return true;
492+
}
493+
}
494+
417495
static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
418496
const MachineRegisterInfo *MRI) {
419497
VSETVLIInfo InstrInfo;
@@ -461,6 +539,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
461539
// If there are no explicit defs, this is a store instruction which can
462540
// ignore the tail and mask policies.
463541
bool StoreOp = MI.getNumExplicitDefs() == 0;
542+
bool ScalarMovOp = isScalarMoveInstr(MI);
464543

465544
if (RISCVII::hasVLOp(TSFlags)) {
466545
const MachineOperand &VLOp = MI.getOperand(NumOperands - 2);
@@ -477,7 +556,7 @@ static VSETVLIInfo computeInfoForInstr(const MachineInstr &MI, uint64_t TSFlags,
477556
} else
478557
InstrInfo.setAVLReg(RISCV::NoRegister);
479558
InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic,
480-
/*MaskAgnostic*/ false, MaskRegOp, StoreOp);
559+
/*MaskAgnostic*/ false, MaskRegOp, StoreOp, ScalarMovOp);
481560

482561
return InstrInfo;
483562
}
@@ -1000,6 +1079,13 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
10001079
PrevVSETVLIMI->getOperand(2).setImm(NewInfo.encodeVTYPE());
10011080
NeedInsertVSETVLI = false;
10021081
}
1082+
if (isScalarMoveInstr(MI) &&
1083+
((CurInfo.hasNonZeroAVL() && NewInfo.hasNonZeroAVL()) ||
1084+
(CurInfo.hasZeroAVL() && NewInfo.hasZeroAVL())) &&
1085+
NewInfo.hasSameVLMAX(CurInfo)) {
1086+
PrevVSETVLIMI->getOperand(2).setImm(NewInfo.encodeVTYPE());
1087+
NeedInsertVSETVLI = false;
1088+
}
10031089
}
10041090
if (NeedInsertVSETVLI)
10051091
insertVSETVLI(MBB, MI, NewInfo, CurInfo);

llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,10 @@ define dso_local <16 x i16> @interleave(<8 x i16> %v0, <8 x i16> %v1) {
2727
; CHECK-NEXT: vsetivli zero, 16, e16, m2, tu, mu
2828
; CHECK-NEXT: vslideup.vi v12, v8, 8
2929
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
30-
; CHECK-NEXT: vrgather.vv v8, v20, v16
3130
; CHECK-NEXT: lui a0, 11
3231
; CHECK-NEXT: addiw a0, a0, -1366
33-
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
3432
; CHECK-NEXT: vmv.s.x v0, a0
35-
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
33+
; CHECK-NEXT: vrgather.vv v8, v20, v16
3634
; CHECK-NEXT: vrgather.vv v8, v12, v18, v0.t
3735
; CHECK-NEXT: ret
3836
entry:

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -513,13 +513,12 @@ define void @buildvec_seq_v9i8(<9 x i8>* %x) {
513513
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
514514
; RV32-NEXT: vmv.s.x v0, a1
515515
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
516-
; RV32-NEXT: vmv.v.i v8, 2
517-
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
516+
; RV32-NEXT: vmv.v.i v9, 2
518517
; RV32-NEXT: li a1, 36
519-
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
520-
; RV32-NEXT: vmv.s.x v0, a1
521-
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
522-
; RV32-NEXT: vmerge.vim v8, v8, 3, v0
518+
; RV32-NEXT: vmv.s.x v8, a1
519+
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
520+
; RV32-NEXT: vmv1r.v v0, v8
521+
; RV32-NEXT: vmerge.vim v8, v9, 3, v0
523522
; RV32-NEXT: vse8.v v8, (a0)
524523
; RV32-NEXT: ret
525524
;

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -319,9 +319,7 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) {
319319
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
320320
; CHECK-NEXT: vrgather.vi v9, v8, 1
321321
; CHECK-NEXT: li a1, 10
322-
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
323322
; CHECK-NEXT: vmv.s.x v0, a1
324-
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
325323
; CHECK-NEXT: vid.v v8
326324
; CHECK-NEXT: vsrl.vi v10, v8, 1
327325
; CHECK-NEXT: vmv.v.x v8, a0
@@ -401,11 +399,9 @@ define <8 x i8> @splat_ve2_we0_ins_i0ve4(<8 x i8> %v, <8 x i8> %w) {
401399
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
402400
; CHECK-NEXT: vmv.s.x v11, a0
403401
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
404-
; CHECK-NEXT: vrgather.vv v10, v8, v11
405402
; CHECK-NEXT: li a0, 66
406-
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
407403
; CHECK-NEXT: vmv.s.x v0, a0
408-
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
404+
; CHECK-NEXT: vrgather.vv v10, v8, v11
409405
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
410406
; CHECK-NEXT: vmv1r.v v8, v10
411407
; CHECK-NEXT: ret
@@ -439,11 +435,9 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
439435
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
440436
; RV32-NEXT: vmv.v.x v11, a0
441437
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
442-
; RV32-NEXT: vrgather.vv v10, v8, v11
443438
; RV32-NEXT: li a0, 66
444-
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
445439
; RV32-NEXT: vmv.s.x v0, a0
446-
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
440+
; RV32-NEXT: vrgather.vv v10, v8, v11
447441
; RV32-NEXT: vrgather.vi v10, v9, 0, v0.t
448442
; RV32-NEXT: vmv1r.v v8, v10
449443
; RV32-NEXT: ret
@@ -455,11 +449,9 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
455449
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
456450
; RV64-NEXT: vmv.v.x v11, a0
457451
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
458-
; RV64-NEXT: vrgather.vv v10, v8, v11
459452
; RV64-NEXT: li a0, 66
460-
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
461453
; RV64-NEXT: vmv.s.x v0, a0
462-
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
454+
; RV64-NEXT: vrgather.vv v10, v8, v11
463455
; RV64-NEXT: vrgather.vi v10, v9, 0, v0.t
464456
; RV64-NEXT: vmv1r.v v8, v10
465457
; RV64-NEXT: ret
@@ -502,11 +494,9 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
502494
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
503495
; RV32-NEXT: vmv.v.x v12, a0
504496
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
505-
; RV32-NEXT: vrgather.vv v10, v8, v12
506497
; RV32-NEXT: li a0, 98
507-
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
508498
; RV32-NEXT: vmv.s.x v0, a0
509-
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
499+
; RV32-NEXT: vrgather.vv v10, v8, v12
510500
; RV32-NEXT: vrgather.vv v10, v9, v11, v0.t
511501
; RV32-NEXT: vmv1r.v v8, v10
512502
; RV32-NEXT: ret
@@ -524,11 +514,9 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
524514
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
525515
; RV64-NEXT: vmv.v.x v12, a0
526516
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
527-
; RV64-NEXT: vrgather.vv v10, v8, v12
528517
; RV64-NEXT: li a0, 98
529-
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
530518
; RV64-NEXT: vmv.s.x v0, a0
531-
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
519+
; RV64-NEXT: vrgather.vv v10, v8, v12
532520
; RV64-NEXT: vrgather.vv v10, v9, v11, v0.t
533521
; RV64-NEXT: vmv1r.v v8, v10
534522
; RV64-NEXT: ret

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll

Lines changed: 4 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4114,22 +4114,16 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
41144114
; LMULMAX2-RV32-NEXT: vle16.v v10, (a0)
41154115
; LMULMAX2-RV32-NEXT: lui a1, 2
41164116
; LMULMAX2-RV32-NEXT: addi a1, a1, 289
4117-
; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
41184117
; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1
4119-
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu
4120-
; LMULMAX2-RV32-NEXT: vmv.v.i v8, 3
4121-
; LMULMAX2-RV32-NEXT: vmerge.vim v12, v8, 2, v0
4118+
; LMULMAX2-RV32-NEXT: vmv.v.i v12, 3
41224119
; LMULMAX2-RV32-NEXT: lui a1, 4
41234120
; LMULMAX2-RV32-NEXT: addi a1, a1, 64
4124-
; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
41254121
; LMULMAX2-RV32-NEXT: vmv.s.x v8, a1
4126-
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu
4122+
; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 2, v0
41274123
; LMULMAX2-RV32-NEXT: vmv1r.v v0, v8
41284124
; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 1, v0
41294125
; LMULMAX2-RV32-NEXT: li a1, 257
4130-
; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
41314126
; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1
4132-
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu
41334127
; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0
41344128
; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI130_0)
41354129
; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI130_0)
@@ -4153,22 +4147,16 @@ define void @mulhu_v16i16(<16 x i16>* %x) {
41534147
; LMULMAX2-RV64-NEXT: vle16.v v10, (a0)
41544148
; LMULMAX2-RV64-NEXT: lui a1, 2
41554149
; LMULMAX2-RV64-NEXT: addiw a1, a1, 289
4156-
; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
41574150
; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1
4158-
; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu
4159-
; LMULMAX2-RV64-NEXT: vmv.v.i v8, 3
4160-
; LMULMAX2-RV64-NEXT: vmerge.vim v12, v8, 2, v0
4151+
; LMULMAX2-RV64-NEXT: vmv.v.i v12, 3
41614152
; LMULMAX2-RV64-NEXT: lui a1, 4
41624153
; LMULMAX2-RV64-NEXT: addiw a1, a1, 64
4163-
; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
41644154
; LMULMAX2-RV64-NEXT: vmv.s.x v8, a1
4165-
; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu
4155+
; LMULMAX2-RV64-NEXT: vmerge.vim v12, v12, 2, v0
41664156
; LMULMAX2-RV64-NEXT: vmv1r.v v0, v8
41674157
; LMULMAX2-RV64-NEXT: vmerge.vim v12, v12, 1, v0
41684158
; LMULMAX2-RV64-NEXT: li a1, 257
4169-
; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
41704159
; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1
4171-
; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu
41724160
; LMULMAX2-RV64-NEXT: vmv.v.i v14, 0
41734161
; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI130_0)
41744162
; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI130_0)
@@ -4531,11 +4519,9 @@ define void @mulhs_v16i16(<16 x i16>* %x) {
45314519
; LMULMAX2-RV32-NEXT: vle16.v v8, (a0)
45324520
; LMULMAX2-RV32-NEXT: lui a1, 7
45334521
; LMULMAX2-RV32-NEXT: addi a1, a1, -1687
4534-
; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
45354522
; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1
45364523
; LMULMAX2-RV32-NEXT: lui a1, 5
45374524
; LMULMAX2-RV32-NEXT: addi a1, a1, -1755
4538-
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu
45394525
; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1
45404526
; LMULMAX2-RV32-NEXT: lui a1, 1048571
45414527
; LMULMAX2-RV32-NEXT: addi a1, a1, 1755
@@ -4553,11 +4539,9 @@ define void @mulhs_v16i16(<16 x i16>* %x) {
45534539
; LMULMAX2-RV64-NEXT: vle16.v v8, (a0)
45544540
; LMULMAX2-RV64-NEXT: lui a1, 7
45554541
; LMULMAX2-RV64-NEXT: addiw a1, a1, -1687
4556-
; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
45574542
; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1
45584543
; LMULMAX2-RV64-NEXT: lui a1, 5
45594544
; LMULMAX2-RV64-NEXT: addiw a1, a1, -1755
4560-
; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu
45614545
; LMULMAX2-RV64-NEXT: vmv.v.x v10, a1
45624546
; LMULMAX2-RV64-NEXT: lui a1, 1048571
45634547
; LMULMAX2-RV64-NEXT: addiw a1, a1, 1755

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -256,9 +256,8 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
256256
; RV64-NEXT: lwu a0, 0(a0)
257257
; RV64-NEXT: slli a1, a1, 32
258258
; RV64-NEXT: or a0, a1, a0
259-
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
260259
; RV64-NEXT: vmv.s.x v8, a0
261-
; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu
260+
; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu
262261
; RV64-NEXT: vslideup.vi v9, v8, 1
263262
; RV64-NEXT: .LBB5_4: # %else2
264263
; RV64-NEXT: vmv1r.v v8, v9

llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,7 @@ for.body: ; preds = %entry, %for.body
147147
define <vscale x 1 x i64> @test7(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
148148
; CHECK-LABEL: test7:
149149
; CHECK: # %bb.0: # %entry
150-
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
151-
; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu
150+
; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu
152151
; CHECK-NEXT: vmv.s.x v8, a0
153152
; CHECK-NEXT: ret
154153
entry:
@@ -163,8 +162,7 @@ entry:
163162
define <vscale x 1 x i64> @test8(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
164163
; CHECK-LABEL: test8:
165164
; CHECK: # %bb.0: # %entry
166-
; CHECK-NEXT: vsetivli a1, 6, e64, m1, ta, mu
167-
; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu
165+
; CHECK-NEXT: vsetivli a1, 6, e64, m1, tu, mu
168166
; CHECK-NEXT: vmv.s.x v8, a0
169167
; CHECK-NEXT: ret
170168
entry:
@@ -178,7 +176,6 @@ define <vscale x 1 x i64> @test9(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1
178176
; CHECK: # %bb.0: # %entry
179177
; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu
180178
; CHECK-NEXT: vadd.vv v8, v8, v8, v0.t
181-
; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu
182179
; CHECK-NEXT: vmv.s.x v8, a0
183180
; CHECK-NEXT: ret
184181
entry:

0 commit comments

Comments
 (0)