Skip to content

[RISCV] Emit VP strided loads/stores in RISCVGatherScatterLowering #98111

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions llvm/include/llvm/IR/IntrinsicsRISCV.td
Original file line number Diff line number Diff line change
Expand Up @@ -1710,20 +1710,6 @@ let TargetPrefix = "riscv" in {
defm vsuxseg # nf : RISCVISegStore<nf>;
}

// Strided loads/stores for fixed vectors.
def int_riscv_masked_strided_load
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyptr_ty,
llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[NoCapture<ArgIndex<1>>, IntrReadMem]>;
def int_riscv_masked_strided_store
: DefaultAttrsIntrinsic<[],
[llvm_anyvector_ty, llvm_anyptr_ty,
llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>;

// Segment loads/stores for fixed vectors.
foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
def int_riscv_seg # nf # _load
Expand Down
18 changes: 12 additions & 6 deletions llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -515,17 +515,23 @@ bool RISCVGatherScatterLowering::tryCreateStridedLoadStore(IntrinsicInst *II,

Builder.SetInsertPoint(II);

Value *EVL = Builder.CreateElementCount(
IntegerType::get(Ctx, 32), cast<VectorType>(DataType)->getElementCount());

CallInst *Call;
if (II->getIntrinsicID() == Intrinsic::masked_gather)
if (II->getIntrinsicID() == Intrinsic::masked_gather) {
Call = Builder.CreateIntrinsic(
Intrinsic::riscv_masked_strided_load,
Intrinsic::experimental_vp_strided_load,
{DataType, BasePtr->getType(), Stride->getType()},
{II->getArgOperand(3), BasePtr, Stride, II->getArgOperand(2)});
else
{BasePtr, Stride, II->getArgOperand(2), EVL});
Call = Builder.CreateIntrinsic(
Intrinsic::vp_select, {DataType},
{II->getOperand(2), Call, II->getArgOperand(3), EVL});
} else
Call = Builder.CreateIntrinsic(
Intrinsic::riscv_masked_strided_store,
Intrinsic::experimental_vp_strided_store,
{DataType, BasePtr->getType(), Stride->getType()},
{II->getArgOperand(0), BasePtr, Stride, II->getArgOperand(3)});
{II->getArgOperand(0), BasePtr, Stride, II->getArgOperand(3), EVL});

Call->takeName(II);
II->replaceAllUsesWith(Call);
Expand Down
159 changes: 0 additions & 159 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1622,12 +1622,6 @@ bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
MachineMemOperand::MOVolatile;
return true;
case Intrinsic::riscv_masked_strided_load:
return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ false,
/*IsUnitStrided*/ false);
case Intrinsic::riscv_masked_strided_store:
return SetRVVLoadStoreInfo(/*PtrOp*/ 1, /*IsStore*/ true,
/*IsUnitStrided*/ false);
case Intrinsic::riscv_seg2_load:
case Intrinsic::riscv_seg3_load:
case Intrinsic::riscv_seg4_load:
Expand Down Expand Up @@ -9414,81 +9408,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
switch (IntNo) {
default:
break;
case Intrinsic::riscv_masked_strided_load: {
SDLoc DL(Op);
MVT XLenVT = Subtarget.getXLenVT();

// If the mask is known to be all ones, optimize to an unmasked intrinsic;
// the selection of the masked intrinsics doesn't do this for us.
SDValue Mask = Op.getOperand(5);
bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());

MVT VT = Op->getSimpleValueType(0);
MVT ContainerVT = VT;
if (VT.isFixedLengthVector())
ContainerVT = getContainerForFixedLengthVector(VT);

SDValue PassThru = Op.getOperand(2);
if (!IsUnmasked) {
MVT MaskVT = getMaskTypeFor(ContainerVT);
if (VT.isFixedLengthVector()) {
Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
}
}

auto *Load = cast<MemIntrinsicSDNode>(Op);
SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
SDValue Ptr = Op.getOperand(3);
SDValue Stride = Op.getOperand(4);
SDValue Result, Chain;

// TODO: We restrict this to unmasked loads currently in consideration of
// the complexity of handling all falses masks.
MVT ScalarVT = ContainerVT.getVectorElementType();
if (IsUnmasked && isNullConstant(Stride) && ContainerVT.isInteger()) {
SDValue ScalarLoad =
DAG.getExtLoad(ISD::EXTLOAD, DL, XLenVT, Load->getChain(), Ptr,
ScalarVT, Load->getMemOperand());
Chain = ScalarLoad.getValue(1);
Result = lowerScalarSplat(SDValue(), ScalarLoad, VL, ContainerVT, DL, DAG,
Subtarget);
} else if (IsUnmasked && isNullConstant(Stride) && isTypeLegal(ScalarVT)) {
SDValue ScalarLoad = DAG.getLoad(ScalarVT, DL, Load->getChain(), Ptr,
Load->getMemOperand());
Chain = ScalarLoad.getValue(1);
Result = DAG.getSplat(ContainerVT, DL, ScalarLoad);
} else {
SDValue IntID = DAG.getTargetConstant(
IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
XLenVT);

SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
if (IsUnmasked)
Ops.push_back(DAG.getUNDEF(ContainerVT));
else
Ops.push_back(PassThru);
Ops.push_back(Ptr);
Ops.push_back(Stride);
if (!IsUnmasked)
Ops.push_back(Mask);
Ops.push_back(VL);
if (!IsUnmasked) {
SDValue Policy =
DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
Ops.push_back(Policy);
}

SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
Result =
DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
Load->getMemoryVT(), Load->getMemOperand());
Chain = Result.getValue(1);
}
if (VT.isFixedLengthVector())
Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
return DAG.getMergeValues({Result, Chain}, DL);
}
case Intrinsic::riscv_seg2_load:
case Intrinsic::riscv_seg3_load:
case Intrinsic::riscv_seg4_load:
Expand Down Expand Up @@ -9568,47 +9487,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
switch (IntNo) {
default:
break;
case Intrinsic::riscv_masked_strided_store: {
SDLoc DL(Op);
MVT XLenVT = Subtarget.getXLenVT();

// If the mask is known to be all ones, optimize to an unmasked intrinsic;
// the selection of the masked intrinsics doesn't do this for us.
SDValue Mask = Op.getOperand(5);
bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());

SDValue Val = Op.getOperand(2);
MVT VT = Val.getSimpleValueType();
MVT ContainerVT = VT;
if (VT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(VT);
Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
}
if (!IsUnmasked) {
MVT MaskVT = getMaskTypeFor(ContainerVT);
if (VT.isFixedLengthVector())
Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
}

SDValue VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;

SDValue IntID = DAG.getTargetConstant(
IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
XLenVT);

auto *Store = cast<MemIntrinsicSDNode>(Op);
SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
Ops.push_back(Val);
Ops.push_back(Op.getOperand(3)); // Ptr
Ops.push_back(Op.getOperand(4)); // Stride
if (!IsUnmasked)
Ops.push_back(Mask);
Ops.push_back(VL);

return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
Ops, Store->getMemoryVT(),
Store->getMemOperand());
}
case Intrinsic::riscv_seg2_store:
case Intrinsic::riscv_seg3_store:
case Intrinsic::riscv_seg4_store:
Expand Down Expand Up @@ -17551,43 +17429,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
// By default we do not combine any intrinsic.
default:
return SDValue();
case Intrinsic::riscv_masked_strided_load: {
MVT VT = N->getSimpleValueType(0);
auto *Load = cast<MemIntrinsicSDNode>(N);
SDValue PassThru = N->getOperand(2);
SDValue Base = N->getOperand(3);
SDValue Stride = N->getOperand(4);
SDValue Mask = N->getOperand(5);

// If the stride is equal to the element size in bytes, we can use
// a masked.load.
const unsigned ElementSize = VT.getScalarStoreSize();
if (auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
StrideC && StrideC->getZExtValue() == ElementSize)
return DAG.getMaskedLoad(VT, DL, Load->getChain(), Base,
DAG.getUNDEF(XLenVT), Mask, PassThru,
Load->getMemoryVT(), Load->getMemOperand(),
ISD::UNINDEXED, ISD::NON_EXTLOAD);
return SDValue();
}
case Intrinsic::riscv_masked_strided_store: {
auto *Store = cast<MemIntrinsicSDNode>(N);
SDValue Value = N->getOperand(2);
SDValue Base = N->getOperand(3);
SDValue Stride = N->getOperand(4);
SDValue Mask = N->getOperand(5);

// If the stride is equal to the element size in bytes, we can use
// a masked.store.
const unsigned ElementSize = Value.getValueType().getScalarStoreSize();
if (auto *StrideC = dyn_cast<ConstantSDNode>(Stride);
StrideC && StrideC->getZExtValue() == ElementSize)
return DAG.getMaskedStore(Store->getChain(), DL, Value, Base,
DAG.getUNDEF(XLenVT), Mask,
Value.getValueType(), Store->getMemOperand(),
ISD::UNINDEXED, false);
return SDValue();
}
case Intrinsic::riscv_vcpop:
case Intrinsic::riscv_vcpop_mask:
case Intrinsic::riscv_vfirst:
Expand Down
16 changes: 0 additions & 16 deletions llvm/test/CodeGen/RISCV/pr89833.ll

This file was deleted.

Loading
Loading