Skip to content

[RISCV][NFC] Convert some predicates to TIIPredicate #129658

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions llvm/lib/Target/RISCV/RISCV.td
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ include "RISCVCallingConv.td"
include "RISCVInstrInfo.td"
include "GISel/RISCVRegisterBanks.td"

//===----------------------------------------------------------------------===//
// Instruction predicates
//===----------------------------------------------------------------------===//

include "RISCVInstrPredicates.td"

//===----------------------------------------------------------------------===//
// RISC-V macro fusions.
//===----------------------------------------------------------------------===//
Expand Down
21 changes: 2 additions & 19 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20438,23 +20438,6 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
return BB;
}

static bool isSelectPseudo(MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
case RISCV::Select_GPR_Using_CC_GPR:
case RISCV::Select_GPR_Using_CC_Imm:
case RISCV::Select_FPR16_Using_CC_GPR:
case RISCV::Select_FPR16INX_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
case RISCV::Select_FPR32INX_Using_CC_GPR:
case RISCV::Select_FPR64_Using_CC_GPR:
case RISCV::Select_FPR64INX_Using_CC_GPR:
case RISCV::Select_FPR64IN32X_Using_CC_GPR:
return true;
}
}

static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
unsigned RelOpcode, unsigned EqOpcode,
const RISCVSubtarget &Subtarget) {
Expand Down Expand Up @@ -20650,7 +20633,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
SequenceMBBI != E; ++SequenceMBBI) {
if (SequenceMBBI->isDebugInstr())
continue;
if (isSelectPseudo(*SequenceMBBI)) {
if (RISCVInstrInfo::isSelectPseudo(*SequenceMBBI)) {
if (SequenceMBBI->getOperand(1).getReg() != LHS ||
!SequenceMBBI->getOperand(2).isReg() ||
SequenceMBBI->getOperand(2).getReg() != RHS ||
Expand Down Expand Up @@ -20727,7 +20710,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
auto InsertionPoint = TailMBB->begin();
while (SelectMBBI != SelectEnd) {
auto Next = std::next(SelectMBBI);
if (isSelectPseudo(*SelectMBBI)) {
if (RISCVInstrInfo::isSelectPseudo(*SelectMBBI)) {
// %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
Expand Down
118 changes: 23 additions & 95 deletions llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,74 +69,6 @@ static unsigned getSEWOpNum(const MachineInstr &MI) {
return RISCVII::getSEWOpNum(MI.getDesc());
}

static bool isVectorConfigInstr(const MachineInstr &MI) {
return MI.getOpcode() == RISCV::PseudoVSETVLI ||
MI.getOpcode() == RISCV::PseudoVSETVLIX0 ||
MI.getOpcode() == RISCV::PseudoVSETIVLI;
}

/// Return true if this is 'vsetvli x0, x0, vtype' which preserves
/// VL and only sets VTYPE.
static bool isVLPreservingConfig(const MachineInstr &MI) {
if (MI.getOpcode() != RISCV::PseudoVSETVLIX0)
return false;
assert(RISCV::X0 == MI.getOperand(1).getReg());
return RISCV::X0 == MI.getOperand(0).getReg();
}

static bool isFloatScalarMoveOrScalarSplatInstr(const MachineInstr &MI) {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
default:
return false;
case RISCV::VFMV_S_F:
case RISCV::VFMV_V_F:
return true;
}
}

static bool isScalarExtractInstr(const MachineInstr &MI) {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
default:
return false;
case RISCV::VMV_X_S:
case RISCV::VFMV_F_S:
return true;
}
}

static bool isScalarInsertInstr(const MachineInstr &MI) {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
default:
return false;
case RISCV::VMV_S_X:
case RISCV::VFMV_S_F:
return true;
}
}

static bool isScalarSplatInstr(const MachineInstr &MI) {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
default:
return false;
case RISCV::VMV_V_I:
case RISCV::VMV_V_X:
case RISCV::VFMV_V_F:
return true;
}
}

static bool isVSlideInstr(const MachineInstr &MI) {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
default:
return false;
case RISCV::VSLIDEDOWN_VX:
case RISCV::VSLIDEDOWN_VI:
case RISCV::VSLIDEUP_VX:
case RISCV::VSLIDEUP_VI:
return true;
}
}

/// Get the EEW for a load or store instruction. Return std::nullopt if MI is
/// not a load or store which ignores SEW.
static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
Expand Down Expand Up @@ -166,13 +98,6 @@ static std::optional<unsigned> getEEWForLoadStore(const MachineInstr &MI) {
}
}

static bool isNonZeroLoadImmediate(const MachineInstr &MI) {
return MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() && MI.getOperand(2).isImm() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
MI.getOperand(2).getImm() != 0;
}

/// Return true if this is an operation on mask registers. Note that
/// this includes both arithmetic/logical ops and load/store (vlm/vsm).
static bool isMaskRegOp(const MachineInstr &MI) {
Expand Down Expand Up @@ -458,7 +383,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
}

// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
if (isScalarInsertInstr(MI)) {
if (RISCVInstrInfo::isScalarInsertInstr(MI)) {
Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Res.VLAny = false;
Expand All @@ -469,7 +394,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// tail lanes to either be the original value or -1. We are writing
// unknown bits to the lanes here.
if (hasUndefinedPassthru(MI)) {
if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(MI) &&
!ST->hasVInstructionsF64())
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
else
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
Expand All @@ -478,7 +404,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
}

// vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
if (isScalarExtractInstr(MI)) {
if (RISCVInstrInfo::isScalarExtractInstr(MI)) {
assert(!RISCVII::hasVLOp(TSFlags));
Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Expand All @@ -496,8 +422,8 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// non-zero VL. We could generalize this if we had a VL > C predicate.
// * The LMUL1 restriction is for machines whose latency may depend on VL.
// * As above, this is only legal for tail "undefined" not "agnostic".
if (isVSlideInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
hasUndefinedPassthru(MI)) {
if (RISCVInstrInfo::isVSlideInstr(MI) && VLOp.isImm() &&
VLOp.getImm() == 1 && hasUndefinedPassthru(MI)) {
Res.VLAny = false;
Res.VLZeroness = true;
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
Expand All @@ -510,12 +436,13 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// it's place. Since a splat is non-constant time in LMUL, we do need to be
// careful to not increase the number of active vector registers (unlike for
// vmv.s.x.)
if (isScalarSplatInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
hasUndefinedPassthru(MI)) {
if (RISCVInstrInfo::isScalarSplatInstr(MI) && VLOp.isImm() &&
VLOp.getImm() == 1 && hasUndefinedPassthru(MI)) {
Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
Res.SEWLMULRatio = false;
Res.VLAny = false;
if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(MI) &&
!ST->hasVInstructionsF64())
Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
else
Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
Expand Down Expand Up @@ -651,7 +578,7 @@ class VSETVLIInfo {
return getAVLImm() > 0;
if (hasAVLReg()) {
if (auto *DefMI = getAVLDefMI(LIS))
return isNonZeroLoadImmediate(*DefMI);
return RISCVInstrInfo::isNonZeroLoadImmediate(*DefMI);
}
if (hasAVLVLMAX())
return true;
Expand Down Expand Up @@ -979,7 +906,7 @@ void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &Info) const {
if (!Info.hasAVLReg())
return;
const MachineInstr *DefMI = Info.getAVLDefMI(LIS);
if (!DefMI || !isVectorConfigInstr(*DefMI))
if (!DefMI || !RISCVInstrInfo::isVectorConfigInstr(*DefMI))
return;
VSETVLIInfo DefInstrInfo = getInfoForVSETVLI(*DefMI);
if (!DefInstrInfo.hasSameVLMAX(Info))
Expand Down Expand Up @@ -1085,7 +1012,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
InstrInfo.setAVLRegDef(VNI, VLOp.getReg());
}
} else {
assert(isScalarExtractInstr(MI));
assert(RISCVInstrInfo::isScalarExtractInstr(MI));
// Pick a random value for state tracking purposes, will be ignored via
// the demanded fields mechanism
InstrInfo.setAVLImm(1);
Expand Down Expand Up @@ -1126,7 +1053,7 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
// same, we can use the X0, X0 form.
if (Info.hasSameVLMAX(PrevInfo) && Info.hasAVLReg()) {
if (const MachineInstr *DefMI = Info.getAVLDefMI(LIS);
DefMI && isVectorConfigInstr(*DefMI)) {
DefMI && RISCVInstrInfo::isVectorConfigInstr(*DefMI)) {
VSETVLIInfo DefInfo = getInfoForVSETVLI(*DefMI);
if (DefInfo.hasSameAVL(PrevInfo) && DefInfo.hasSameVLMAX(PrevInfo)) {
auto MI = BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoVSETVLIX0))
Expand Down Expand Up @@ -1304,7 +1231,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
// reflect the changes MI might make.
void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &Info,
const MachineInstr &MI) const {
if (isVectorConfigInstr(MI)) {
if (RISCVInstrInfo::isVectorConfigInstr(MI)) {
Info = getInfoForVSETVLI(MI);
return;
}
Expand Down Expand Up @@ -1339,7 +1266,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
for (const MachineInstr &MI : MBB) {
transferBefore(Info, MI);

if (isVectorConfigInstr(MI) || RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
if (RISCVInstrInfo::isVectorConfigInstr(MI) ||
RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
isVectorCopy(ST->getRegisterInfo(), MI))
HadVectorOp = true;

Expand Down Expand Up @@ -1429,7 +1357,7 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
if (!Value)
return true;
MachineInstr *DefMI = LIS->getInstructionFromIndex(Value->def);
if (!DefMI || !isVectorConfigInstr(*DefMI))
if (!DefMI || !RISCVInstrInfo::isVectorConfigInstr(*DefMI))
return true;

// We found a VSET(I)VLI make sure it matches the output of the
Expand Down Expand Up @@ -1460,7 +1388,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
transferBefore(CurInfo, MI);

// If this is an explicit VSETVLI or VSETIVLI, update our state.
if (isVectorConfigInstr(MI)) {
if (RISCVInstrInfo::isVectorConfigInstr(MI)) {
// Conservatively, mark the VL and VTYPE as live.
assert(MI.getOperand(3).getReg() == RISCV::VL &&
MI.getOperand(4).getReg() == RISCV::VTYPE &&
Expand Down Expand Up @@ -1667,12 +1595,12 @@ bool RISCVInsertVSETVLI::canMutatePriorConfig(
// If the VL values aren't equal, return false if either a) the former is
// demanded, or b) we can't rewrite the former to be the later for
// implementation reasons.
if (!isVLPreservingConfig(MI)) {
if (!RISCVInstrInfo::isVLPreservingConfig(MI)) {
if (Used.VLAny)
return false;

if (Used.VLZeroness) {
if (isVLPreservingConfig(PrevMI))
if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
return false;
if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(MI),
LIS))
Expand Down Expand Up @@ -1723,7 +1651,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {

for (MachineInstr &MI : make_early_inc_range(reverse(MBB))) {

if (!isVectorConfigInstr(MI)) {
if (!RISCVInstrInfo::isVectorConfigInstr(MI)) {
Used.doUnion(getDemanded(MI, ST));
if (MI.isCall() || MI.isInlineAsm() ||
MI.modifiesRegister(RISCV::VL, /*TRI=*/nullptr) ||
Expand All @@ -1747,7 +1675,7 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
}

if (canMutatePriorConfig(MI, *NextMI, Used)) {
if (!isVLPreservingConfig(*NextMI)) {
if (!RISCVInstrInfo::isVLPreservingConfig(*NextMI)) {
Register DefReg = NextMI->getOperand(0).getReg();

MI.getOperand(0).setReg(DefReg);
Expand Down
15 changes: 3 additions & 12 deletions llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@

#include "MCTargetDesc/RISCVBaseInfo.h"
#include "RISCV.h"
#include "RISCVInstrInfo.h"
#include "RISCVSubtarget.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include <queue>
Expand Down Expand Up @@ -227,23 +228,13 @@ char RISCVInsertWriteVXRM::ID = 0;
INITIALIZE_PASS(RISCVInsertWriteVXRM, DEBUG_TYPE, RISCV_INSERT_WRITE_VXRM_NAME,
false, false)

static bool ignoresVXRM(const MachineInstr &MI) {
switch (RISCV::getRVVMCOpcode(MI.getOpcode())) {
default:
return false;
case RISCV::VNCLIP_WI:
case RISCV::VNCLIPU_WI:
return MI.getOperand(3).getImm() == 0;
}
}

bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
BlockData &BBInfo = BlockInfo[MBB.getNumber()];

bool NeedVXRMWrite = false;
for (const MachineInstr &MI : MBB) {
int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
if (VXRMIdx >= 0 && !ignoresVXRM(MI)) {
if (VXRMIdx >= 0 && !RISCVInstrInfo::ignoresVXRM(MI)) {
unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();

if (!BBInfo.VXRMUse.isValid())
Expand Down Expand Up @@ -401,7 +392,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {

for (MachineInstr &MI : MBB) {
int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
if (VXRMIdx >= 0 && !ignoresVXRM(MI)) {
if (VXRMIdx >= 0 && !RISCVInstrInfo::ignoresVXRM(MI)) {
unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();

if (PendingInsert || !Info.isStatic() ||
Expand Down
27 changes: 6 additions & 21 deletions llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
: RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
STI(STI) {}

#define GET_INSTRINFO_HELPERS
#include "RISCVGenInstrInfo.inc"

MCInst RISCVInstrInfo::getNop() const {
if (STI.hasStdExtCOrZca())
return MCInstBuilder(RISCV::C_NOP);
Expand Down Expand Up @@ -835,11 +838,11 @@ std::optional<unsigned> getFoldedOpcode(MachineFunction &MF, MachineInstr &MI,

switch (MI.getOpcode()) {
default:
if (RISCV::isSEXT_W(MI))
if (RISCVInstrInfo::isSEXT_W(MI))
return RISCV::LW;
if (RISCV::isZEXT_W(MI))
if (RISCVInstrInfo::isZEXT_W(MI))
return RISCV::LWU;
if (RISCV::isZEXT_B(MI))
if (RISCVInstrInfo::isZEXT_B(MI))
return RISCV::LBU;
break;
case RISCV::SEXT_H:
Expand Down Expand Up @@ -4169,24 +4172,6 @@ unsigned RISCVInstrInfo::getTailDuplicateSize(CodeGenOptLevel OptLevel) const {
: 2;
}

// Returns true if this is the sext.w pattern, addiw rd, rs1, 0.
bool RISCV::isSEXT_W(const MachineInstr &MI) {
return MI.getOpcode() == RISCV::ADDIW && MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
}

// Returns true if this is the zext.w pattern, adduw rd, rs1, x0.
bool RISCV::isZEXT_W(const MachineInstr &MI) {
return MI.getOpcode() == RISCV::ADD_UW && MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() && MI.getOperand(2).getReg() == RISCV::X0;
}

// Returns true if this is the zext.b pattern, andi rd, rs1, 255.
bool RISCV::isZEXT_B(const MachineInstr &MI) {
return MI.getOpcode() == RISCV::ANDI && MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
}

bool RISCV::isRVVSpill(const MachineInstr &MI) {
// RVV lacks any support for immediate addressing for stack addresses, so be
// conservative.
Expand Down
Loading
Loading