@@ -1924,7 +1924,7 @@ bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
19241924 // replace. If we don't support unaligned scalar mem, prefer the constant
19251925 // pool.
19261926 // TODO: Can the caller pass down the alignment?
1927- if (!Subtarget.hasFastUnalignedAccess ())
1927+ if (!Subtarget.enableUnalignedScalarMem ())
19281928 return true;
19291929
19301930 // Prefer to keep the load if it would require many instructions.
@@ -15837,7 +15837,7 @@ static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
1583715837 if (WiderElementSize > ST.getELen()/8)
1583815838 return false;
1583915839
15840- if (!ST.hasFastUnalignedAccess () && BaseAlign < WiderElementSize)
15840+ if (!ST.enableUnalignedVectorMem () && BaseAlign < WiderElementSize)
1584115841 return false;
1584215842
1584315843 for (unsigned i = 0; i < Index->getNumOperands(); i++) {
@@ -20663,8 +20663,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
2066320663 unsigned *Fast) const {
2066420664 if (!VT.isVector()) {
2066520665 if (Fast)
20666- *Fast = Subtarget.hasFastUnalignedAccess ();
20667- return Subtarget.hasFastUnalignedAccess ();
20666+ *Fast = Subtarget.enableUnalignedScalarMem ();
20667+ return Subtarget.enableUnalignedScalarMem ();
2066820668 }
2066920669
2067020670 // All vector implementations must support element alignment
@@ -20680,8 +20680,8 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
2068020680 // misaligned accesses. TODO: Work through the codegen implications of
2068120681 // allowing such accesses to be formed, and considered fast.
2068220682 if (Fast)
20683- *Fast = Subtarget.hasFastUnalignedAccess ();
20684- return Subtarget.hasFastUnalignedAccess ();
20683+ *Fast = Subtarget.enableUnalignedVectorMem ();
20684+ return Subtarget.enableUnalignedVectorMem ();
2068520685}
2068620686
2068720687
@@ -20716,7 +20716,7 @@ EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
2071620716
2071720717 // Do we have sufficient alignment for our preferred VT? If not, revert
2071820718 // to largest size allowed by our alignment criteria.
20719- if (PreferredVT != MVT::i8 && !Subtarget.hasFastUnalignedAccess ()) {
20719+ if (PreferredVT != MVT::i8 && !Subtarget.enableUnalignedVectorMem ()) {
2072020720 Align RequiredAlign(PreferredVT.getStoreSize());
2072120721 if (Op.isFixedDstAlign())
2072220722 RequiredAlign = std::min(RequiredAlign, Op.getDstAlign());
@@ -20908,7 +20908,7 @@ bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
2090820908 if (!isLegalElementTypeForRVV(ScalarType))
2090920909 return false;
2091020910
20911- if (!Subtarget.hasFastUnalignedAccess () &&
20911+ if (!Subtarget.enableUnalignedVectorMem () &&
2091220912 Alignment < ScalarType.getStoreSize())
2091320913 return false;
2091420914
0 commit comments