Skip to content

[RISCV] Removeriscv.segN.load/store in favor of their mask variants #137045

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 4 additions & 16 deletions llvm/include/llvm/IR/IntrinsicsRISCV.td
Original file line number Diff line number Diff line change
Expand Up @@ -1704,14 +1704,10 @@ let TargetPrefix = "riscv" in {
}

// Segment loads/stores for fixed vectors.
// Note: we only have the masked variants because RISCVVectorPeephole
// would lower any instructions with all-ones mask into unmasked version
// anyway.
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
// Input: (pointer, vl)
def int_riscv_seg # nf # _load
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
!listsplat(LLVMMatchType<0>,
!add(nf, -1))),
[llvm_anyptr_ty, llvm_anyint_ty],
[NoCapture<ArgIndex<0>>, IntrReadMem]>;
// Input: (pointer, mask, vl)
def int_riscv_seg # nf # _load_mask
: DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
Expand All @@ -1721,15 +1717,7 @@ let TargetPrefix = "riscv" in {
llvm_anyint_ty],
[NoCapture<ArgIndex<0>>, IntrReadMem]>;

// Input: (<stored values>, pointer, vl)
def int_riscv_seg # nf # _store
: DefaultAttrsIntrinsic<[],
!listconcat([llvm_anyvector_ty],
!listsplat(LLVMMatchType<0>,
!add(nf, -1)),
[llvm_anyptr_ty, llvm_anyint_ty]),
[NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
// Input: (<stored values>, pointer, mask, vl)
// Input: (<stored values>..., pointer, mask, vl)
def int_riscv_seg # nf # _store_mask
: DefaultAttrsIntrinsic<[],
!listconcat([llvm_anyvector_ty],
Expand Down
140 changes: 36 additions & 104 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1745,13 +1745,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile;
return true;
case Intrinsic::riscv_seg2_load:
case Intrinsic::riscv_seg3_load:
case Intrinsic::riscv_seg4_load:
case Intrinsic::riscv_seg5_load:
case Intrinsic::riscv_seg6_load:
case Intrinsic::riscv_seg7_load:
case Intrinsic::riscv_seg8_load:
case Intrinsic::riscv_seg2_load_mask:
case Intrinsic::riscv_seg3_load_mask:
case Intrinsic::riscv_seg4_load_mask:
Expand All @@ -1761,17 +1754,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
case Intrinsic::riscv_seg8_load_mask:
return SetRVVLoadStoreInfo(/*PtrOp*/ 0, /*IsStore*/ false,
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
case Intrinsic::riscv_seg2_store:
case Intrinsic::riscv_seg3_store:
case Intrinsic::riscv_seg4_store:
case Intrinsic::riscv_seg5_store:
case Intrinsic::riscv_seg6_store:
case Intrinsic::riscv_seg7_store:
case Intrinsic::riscv_seg8_store:
// Operands are (vec, ..., vec, ptr, vl)
return SetRVVLoadStoreInfo(/*PtrOp*/ I.arg_size() - 2,
/*IsStore*/ true,
/*IsUnitStrided*/ false, /*UsePtrVal*/ true);
case Intrinsic::riscv_seg2_store_mask:
case Intrinsic::riscv_seg3_store_mask:
case Intrinsic::riscv_seg4_store_mask:
Expand Down Expand Up @@ -10591,13 +10573,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
switch (IntNo) {
default:
break;
case Intrinsic::riscv_seg2_load:
case Intrinsic::riscv_seg3_load:
case Intrinsic::riscv_seg4_load:
case Intrinsic::riscv_seg5_load:
case Intrinsic::riscv_seg6_load:
case Intrinsic::riscv_seg7_load:
case Intrinsic::riscv_seg8_load:
case Intrinsic::riscv_seg2_load_mask:
case Intrinsic::riscv_seg3_load_mask:
case Intrinsic::riscv_seg4_load_mask:
Expand All @@ -10620,18 +10595,13 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
ContainerVT.getScalarSizeInBits();
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);

// Masked: (pointer, mask, vl)
// Non-masked: (pointer, vl)
bool IsMasked = Op.getNumOperands() > 4;
// Operands: (chain, int_id, pointer, mask, vl)
SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
SDValue Mask =
IsMasked ? Op.getOperand(3) : getAllOnesMask(ContainerVT, VL, DL, DAG);
SDValue Mask = Op.getOperand(3);
MVT MaskVT = Mask.getSimpleValueType();
if (MaskVT.isFixedLengthVector()) {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remove the check here as pointed out by @lukel97 in another PR

MVT MaskContainerVT =
::getContainerForFixedLengthVector(DAG, MaskVT, Subtarget);
Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
}
MVT MaskContainerVT =
::getContainerForFixedLengthVector(DAG, MaskVT, Subtarget);
Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);

SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
auto *Load = cast<MemIntrinsicSDNode>(Op);
Expand Down Expand Up @@ -10699,13 +10669,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
switch (IntNo) {
default:
break;
case Intrinsic::riscv_seg2_store:
case Intrinsic::riscv_seg3_store:
case Intrinsic::riscv_seg4_store:
case Intrinsic::riscv_seg5_store:
case Intrinsic::riscv_seg6_store:
case Intrinsic::riscv_seg7_store:
case Intrinsic::riscv_seg8_store:
case Intrinsic::riscv_seg2_store_mask:
case Intrinsic::riscv_seg3_store_mask:
case Intrinsic::riscv_seg4_store_mask:
Expand All @@ -10720,24 +10683,8 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
Intrinsic::riscv_vsseg6_mask, Intrinsic::riscv_vsseg7_mask,
Intrinsic::riscv_vsseg8_mask};

bool IsMasked = false;
switch (IntNo) {
case Intrinsic::riscv_seg2_store_mask:
case Intrinsic::riscv_seg3_store_mask:
case Intrinsic::riscv_seg4_store_mask:
case Intrinsic::riscv_seg5_store_mask:
case Intrinsic::riscv_seg6_store_mask:
case Intrinsic::riscv_seg7_store_mask:
case Intrinsic::riscv_seg8_store_mask:
IsMasked = true;
break;
default:
break;
}

// Non-masked: (chain, int_id, vec*, ptr, vl)
// Masked: (chain, int_id, vec*, ptr, mask, vl)
unsigned NF = Op->getNumOperands() - (IsMasked ? 5 : 4);
// Operands: (chain, int_id, vec*, ptr, mask, vl)
unsigned NF = Op->getNumOperands() - 5;
assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
MVT XLenVT = Subtarget.getXLenVT();
MVT VT = Op->getOperand(2).getSimpleValueType();
Expand All @@ -10747,14 +10694,11 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
EVT VecTupTy = MVT::getRISCVVectorTupleVT(Sz, NF);

SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
SDValue Mask = IsMasked ? Op.getOperand(Op.getNumOperands() - 2)
: getAllOnesMask(ContainerVT, VL, DL, DAG);
SDValue Mask = Op.getOperand(Op.getNumOperands() - 2);
MVT MaskVT = Mask.getSimpleValueType();
if (MaskVT.isFixedLengthVector()) {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

similarly, remove the check here

MVT MaskContainerVT =
::getContainerForFixedLengthVector(DAG, MaskVT, Subtarget);
Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
}
MVT MaskContainerVT =
::getContainerForFixedLengthVector(DAG, MaskVT, Subtarget);
Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);

SDValue IntID = DAG.getTargetConstant(VssegInts[NF - 2], DL, XLenVT);
SDValue Ptr = Op->getOperand(NF + 2);
Expand Down Expand Up @@ -23823,10 +23767,10 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
}

static const Intrinsic::ID FixedVlsegIntrIds[] = {
Intrinsic::riscv_seg2_load, Intrinsic::riscv_seg3_load,
Intrinsic::riscv_seg4_load, Intrinsic::riscv_seg5_load,
Intrinsic::riscv_seg6_load, Intrinsic::riscv_seg7_load,
Intrinsic::riscv_seg8_load};
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
Intrinsic::riscv_seg8_load_mask};

/// Lower an interleaved load into a vlsegN intrinsic.
///
Expand Down Expand Up @@ -23877,10 +23821,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
};

Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());

CallInst *VlsegN = Builder.CreateIntrinsic(
FixedVlsegIntrIds[Factor - 2], {VTy, LI->getPointerOperandType(), XLenTy},
{LI->getPointerOperand(), VL});
Value *Mask = Builder.getAllOnesMask(VTy->getElementCount());
CallInst *VlsegN =
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {VTy, XLenTy},
{LI->getPointerOperand(), Mask, VL});

for (unsigned i = 0; i < Shuffles.size(); i++) {
Value *SubVec = Builder.CreateExtractValue(VlsegN, Indices[i]);
Expand All @@ -23891,10 +23835,10 @@ bool RISCVTargetLowering::lowerInterleavedLoad(
}

static const Intrinsic::ID FixedVssegIntrIds[] = {
Intrinsic::riscv_seg2_store, Intrinsic::riscv_seg3_store,
Intrinsic::riscv_seg4_store, Intrinsic::riscv_seg5_store,
Intrinsic::riscv_seg6_store, Intrinsic::riscv_seg7_store,
Intrinsic::riscv_seg8_store};
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
Intrinsic::riscv_seg8_store_mask};

/// Lower an interleaved store into a vssegN intrinsic.
///
Expand Down Expand Up @@ -23954,8 +23898,7 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
}

Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), FixedVssegIntrIds[Factor - 2],
{VTy, SI->getPointerOperandType(), XLenTy});
SI->getModule(), FixedVssegIntrIds[Factor - 2], {VTy, XLenTy});

SmallVector<Value *, 10> Ops;
SmallVector<int, 16> NewShuffleMask;
Expand All @@ -23975,7 +23918,8 @@ bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
// potentially under larger LMULs) because we checked that the fixed vector
// type fits in isLegalInterleavedAccessType
Value *VL = ConstantInt::get(XLenTy, VTy->getNumElements());
Ops.append({SI->getPointerOperand(), VL});
Value *StoreMask = Builder.getAllOnesMask(VTy->getElementCount());
Ops.append({SI->getPointerOperand(), StoreMask, VL});

Builder.CreateCall(VssegNFunc, Ops);

Expand Down Expand Up @@ -24004,10 +23948,10 @@ bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(

if (auto *FVTy = dyn_cast<FixedVectorType>(ResVTy)) {
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
Value *Mask = Builder.getAllOnesMask(FVTy->getElementCount());
Return =
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2],
{ResVTy, LI->getPointerOperandType(), XLenTy},
{LI->getPointerOperand(), VL});
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {ResVTy, XLenTy},
{LI->getPointerOperand(), Mask, VL});
} else {
static const Intrinsic::ID IntrIds[] = {
Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
Expand Down Expand Up @@ -24071,12 +24015,12 @@ bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(

if (auto *FVTy = dyn_cast<FixedVectorType>(InVTy)) {
Function *VssegNFunc = Intrinsic::getOrInsertDeclaration(
SI->getModule(), FixedVssegIntrIds[Factor - 2],
{InVTy, SI->getPointerOperandType(), XLenTy});
SI->getModule(), FixedVssegIntrIds[Factor - 2], {InVTy, XLenTy});

SmallVector<Value *, 10> Ops(InterleaveValues);
Value *VL = ConstantInt::get(XLenTy, FVTy->getNumElements());
Ops.append({SI->getPointerOperand(), VL});
Value *Mask = Builder.getAllOnesMask(FVTy->getElementCount());
Ops.append({SI->getPointerOperand(), Mask, VL});

Builder.CreateCall(VssegNFunc, Ops);
} else {
Expand Down Expand Up @@ -24198,15 +24142,9 @@ bool RISCVTargetLowering::lowerInterleavedVPLoad(

Value *Return = nullptr;
if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
static const Intrinsic::ID FixedMaskedVlsegIntrIds[] = {
Intrinsic::riscv_seg2_load_mask, Intrinsic::riscv_seg3_load_mask,
Intrinsic::riscv_seg4_load_mask, Intrinsic::riscv_seg5_load_mask,
Intrinsic::riscv_seg6_load_mask, Intrinsic::riscv_seg7_load_mask,
Intrinsic::riscv_seg8_load_mask};

Return = Builder.CreateIntrinsic(FixedMaskedVlsegIntrIds[Factor - 2],
{FVTy, XLenTy},
{Load->getArgOperand(0), Mask, EVL});
Return =
Builder.CreateIntrinsic(FixedVlsegIntrIds[Factor - 2], {FVTy, XLenTy},
{Load->getArgOperand(0), Mask, EVL});
} else {
static const Intrinsic::ID IntrMaskIds[] = {
Intrinsic::riscv_vlseg2_mask, Intrinsic::riscv_vlseg3_mask,
Expand Down Expand Up @@ -24318,15 +24256,9 @@ bool RISCVTargetLowering::lowerInterleavedVPStore(
XLenTy);

if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
static const Intrinsic::ID FixedMaskedVssegIntrIds[] = {
Intrinsic::riscv_seg2_store_mask, Intrinsic::riscv_seg3_store_mask,
Intrinsic::riscv_seg4_store_mask, Intrinsic::riscv_seg5_store_mask,
Intrinsic::riscv_seg6_store_mask, Intrinsic::riscv_seg7_store_mask,
Intrinsic::riscv_seg8_store_mask};

SmallVector<Value *, 8> Operands(InterleaveOperands);
Operands.append({Store->getArgOperand(1), Mask, EVL});
Builder.CreateIntrinsic(FixedMaskedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
Builder.CreateIntrinsic(FixedVssegIntrIds[Factor - 2], {FVTy, XLenTy},
Operands);
return true;
}
Expand Down
21 changes: 7 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-segN-load.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ define <8 x i8> @load_factor2(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
ret <8 x i8> %3
Expand All @@ -19,7 +19,7 @@ define <8 x i8> @load_factor3(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg3e8.v v6, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
Expand All @@ -32,7 +32,7 @@ define <8 x i8> @load_factor4(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg4e8.v v5, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
Expand All @@ -46,7 +46,7 @@ define <8 x i8> @load_factor5(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg5e8.v v4, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
Expand All @@ -61,7 +61,7 @@ define <8 x i8> @load_factor6(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg6e8.v v3, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
Expand All @@ -77,7 +77,7 @@ define <8 x i8> @load_factor7(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg7e8.v v2, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
Expand All @@ -94,7 +94,7 @@ define <8 x i8> @load_factor8(ptr %ptr) {
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg8e8.v v1, (a0)
; CHECK-NEXT: ret
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
%1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.mask.v8i8.i64(ptr %ptr, <8 x i1> splat (i1 true), i64 8)
%2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
%3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
%4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
Expand All @@ -105,10 +105,3 @@ define <8 x i8> @load_factor8(ptr %ptr) {
%9 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 7
ret <8 x i8> %9
}
declare { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr, i64)
declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr, i64)
Loading
Loading