Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[APInt] Fix APInt constructions where value does not fit bitwidth (NFCI) #80309

Merged
merged 7 commits into from
Oct 17, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion clang/include/clang/Sema/Sema.h
Original file line number Diff line number Diff line change
Expand Up @@ -6755,7 +6755,7 @@ class Sema final : public SemaBase {

ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedIdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult ActOnIntegerConstant(SourceLocation Loc, int64_t Val);

bool CheckLoopHintExpr(Expr *E, SourceLocation Loc, bool AllowZero);

Expand Down
7 changes: 5 additions & 2 deletions clang/lib/AST/ByteCode/IntegralAP.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ template <bool Signed> class IntegralAP final {

IntegralAP(APInt V) : V(V) {}
/// Arbitrary value for uninitialized variables.
IntegralAP() : IntegralAP(-1, 3) {}
IntegralAP() : IntegralAP(Signed ? -1 : 7, 3) {}

IntegralAP operator-() const { return IntegralAP(-V); }
IntegralAP operator-(const IntegralAP &Other) const {
Expand Down Expand Up @@ -112,7 +112,10 @@ template <bool Signed> class IntegralAP final {

template <unsigned Bits, bool InputSigned>
static IntegralAP from(Integral<Bits, InputSigned> I, unsigned BitWidth) {
APInt Copy = APInt(BitWidth, static_cast<uint64_t>(I), InputSigned);
// TODO: Avoid implicit trunc?
// See https://github.com/llvm/llvm-project/issues/112510.
APInt Copy = APInt(BitWidth, static_cast<uint64_t>(I), InputSigned,
/*implicitTrunc=*/true);

return IntegralAP<Signed>(Copy);
}
Expand Down
5 changes: 3 additions & 2 deletions clang/lib/CodeGen/CGVTT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,9 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
cast<llvm::StructType>(VTable->getValueType())
->getElementType(AddressPoint.VTableIndex));
unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
llvm::APInt(32, VTableSize - Offset, true));
llvm::ConstantRange InRange(
llvm::APInt(32, (int)-Offset, true),
llvm::APInt(32, (int)(VTableSize - Offset), true));
llvm::Constant *Init = llvm::ConstantExpr::getGetElementPtr(
VTable->getValueType(), VTable, Idxs, /*InBounds=*/true, InRange);

Expand Down
5 changes: 3 additions & 2 deletions clang/lib/CodeGen/ItaniumCXXABI.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2099,8 +2099,9 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
unsigned VTableSize =
ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex);
unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
llvm::APInt(32, VTableSize - Offset, true));
llvm::ConstantRange InRange(
llvm::APInt(32, (int)-Offset, true),
llvm::APInt(32, (int)(VTableSize - Offset), true));
return llvm::ConstantExpr::getGetElementPtr(
VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
}
Expand Down
6 changes: 3 additions & 3 deletions clang/lib/Parse/ParseInit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -436,9 +436,9 @@ ExprResult Parser::createEmbedExpr() {
ASTContext &Context = Actions.getASTContext();
SourceLocation StartLoc = ConsumeAnnotationToken();
if (Data->BinaryData.size() == 1) {
Res = IntegerLiteral::Create(Context,
llvm::APInt(CHAR_BIT, Data->BinaryData.back()),
Context.UnsignedCharTy, StartLoc);
Res = IntegerLiteral::Create(
Context, llvm::APInt(CHAR_BIT, (unsigned char)Data->BinaryData.back()),
Context.UnsignedCharTy, StartLoc);
} else {
auto CreateStringLiteralFromStringRef = [&](StringRef Str, QualType Ty) {
llvm::APSInt ArraySize =
Expand Down
5 changes: 3 additions & 2 deletions clang/lib/Sema/SemaExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3598,9 +3598,10 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
Lit, Tok.getLocation());
}

ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) {
ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, int64_t Val) {
unsigned IntSize = Context.getTargetInfo().getIntWidth();
return IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val),
return IntegerLiteral::Create(Context,
llvm::APInt(IntSize, Val, /*isSigned=*/true),
Context.IntTy, Loc);
}

Expand Down
4 changes: 3 additions & 1 deletion clang/lib/Sema/SemaOpenMP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5697,7 +5697,9 @@ StmtResult SemaOpenMP::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
llvm_unreachable("unhandled unary increment operator");
}
Step = IntegerLiteral::Create(
Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction), LogicalTy, {});
Ctx,
llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction, /*isSigned=*/true),
LogicalTy, {});
} else if (auto *IncBin = dyn_cast<BinaryOperator>(Inc)) {
if (IncBin->getOpcode() == BO_AddAssign) {
Step = IncBin->getRHS();
Expand Down
8 changes: 5 additions & 3 deletions lldb/source/Expression/DWARFExpression.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -860,10 +860,12 @@ llvm::Expected<Value> DWARFExpression::Evaluate(
// TODO: Implement a real typed stack, and store the genericness of the value
// there.
auto to_generic = [&](auto v) {
// TODO: Avoid implicit trunc?
// See https://github.com/llvm/llvm-project/issues/112510.
bool is_signed = std::is_signed<decltype(v)>::value;
return Scalar(llvm::APSInt(
llvm::APInt(8 * opcodes.GetAddressByteSize(), v, is_signed),
!is_signed));
return Scalar(llvm::APSInt(llvm::APInt(8 * opcodes.GetAddressByteSize(), v,
is_signed, /*implicitTrunc=*/true),
!is_signed));
};

// The default kind is a memory location. This is updated by any
Expand Down
4 changes: 3 additions & 1 deletion llvm/include/llvm/ADT/APFixedPoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,9 @@ class APFixedPoint {
}

APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
: APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned()), Sema) {}
: APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned(),
/*implicitTrunc=*/true),
Sema) {}

// Zero initialization.
APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Analysis/ConstantFolding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -888,7 +888,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
APInt Offset = APInt(
BitWidth,
DL.getIndexedOffsetInType(
SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)));
SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)),
/*isSigned=*/true, /*implicitTrunc=*/true);

std::optional<ConstantRange> InRange = GEP->getInRange();
if (InRange)
Expand Down
6 changes: 2 additions & 4 deletions llvm/lib/Analysis/Loads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,8 @@ static bool isDereferenceableAndAlignedPointer(

auto IsKnownDeref = [&]() {
bool CheckForNonNull, CheckForFreed;
APInt KnownDerefBytes(Size.getBitWidth(),
V->getPointerDereferenceableBytes(DL, CheckForNonNull,
CheckForFreed));
if (!KnownDerefBytes.getBoolValue() || !KnownDerefBytes.uge(Size) ||
if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
CheckForFreed)) ||
CheckForFreed)
return false;
if (CheckForNonNull &&
Expand Down
2 changes: 2 additions & 0 deletions llvm/lib/Analysis/MemoryBuiltins.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -767,6 +767,8 @@ SizeOffsetAPInt ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType());
if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min)
return ObjectSizeOffsetVisitor::unknown();
if (!isUIntN(IntTyBits, ElemSize.getKnownMinValue()))
return ObjectSizeOffsetVisitor::unknown();
APInt Size(IntTyBits, ElemSize.getKnownMinValue());
if (!I.isArrayAllocation())
return SizeOffsetAPInt(align(Size, I.getAlign()), Zero);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Analysis/ScalarEvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6883,7 +6883,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
bool CanBeNull, CanBeFreed;
uint64_t DerefBytes =
V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
if (DerefBytes > 1) {
if (DerefBytes > 1 && isUIntN(BitWidth, DerefBytes)) {
// The highest address the object can start is DerefBytes bytes before
// the end (unsigned max value). If this value is not a multiple of the
// alignment, the last possible start value is the next lowest multiple
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Bitcode/Reader/BitcodeReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -876,7 +876,8 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
} else {
int64_t Start = BitcodeReader::decodeSignRotatedValue(Record[OpNum++]);
int64_t End = BitcodeReader::decodeSignRotatedValue(Record[OpNum++]);
return ConstantRange(APInt(BitWidth, Start), APInt(BitWidth, End));
return ConstantRange(APInt(BitWidth, Start, true),
APInt(BitWidth, End, true));
}
}

Expand Down
6 changes: 5 additions & 1 deletion llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1641,7 +1641,11 @@ SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
assert((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
"getConstant with a uint64_t value that doesn't fit in the type!");
return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
// TODO: Avoid implicit trunc?
// See https://github.com/llvm/llvm-project/issues/112510.
return getConstant(APInt(EltVT.getSizeInBits(), Val, /*isSigned=*/false,
/*implicitTrunc=*/true),
DL, VT, isT, isO);
}

SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4333,7 +4333,8 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
GTI.getSequentialElementStride(DAG.getDataLayout());
// We intentionally mask away the high bits here; ElementSize may not
// fit in IdxTy.
APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
/*isSigned=*/false, /*implicitTrunc=*/true);
bool ElementScalable = ElementSize.isScalable();

// If this is a scalar constant or a splat vector of constants,
Expand Down
10 changes: 8 additions & 2 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2200,7 +2200,10 @@ ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
// TODO: Avoid implicit trunc?
topperc marked this conversation as resolved.
Show resolved Hide resolved
// See https://github.com/llvm/llvm-project/issues/112510.
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS,
/*isSigned=*/false, /*implicitTrunc=*/true);

// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
Expand Down Expand Up @@ -2229,7 +2232,10 @@ bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
// TODO: Avoid implicit trunc?
// See https://github.com/llvm/llvm-project/issues/112510.
const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS,
/*isSigned=*/false, /*implicitTrunc=*/true);

// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
Expand Down
8 changes: 6 additions & 2 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6813,7 +6813,9 @@ TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,

PAmts.push_back(DAG.getConstant(P, DL, SVT));
KAmts.push_back(
DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
DAG.getConstant(APInt(ShSVT.getSizeInBits(), K, /*isSigned=*/false,
/*implicitTrunc=*/true),
DL, ShSVT));
QAmts.push_back(DAG.getConstant(Q, DL, SVT));
return true;
};
Expand Down Expand Up @@ -7084,7 +7086,9 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
PAmts.push_back(DAG.getConstant(P, DL, SVT));
AAmts.push_back(DAG.getConstant(A, DL, SVT));
KAmts.push_back(
DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
DAG.getConstant(APInt(ShSVT.getSizeInBits(), K, /*isSigned=*/false,
/*implicitTrunc=*/true),
DL, ShSVT));
QAmts.push_back(DAG.getConstant(Q, DL, SVT));
return true;
};
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,7 @@ GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
return rv;
}
case Type::VoidTyID:
rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)(), true);
return rv;
case Type::FloatTyID:
rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
Expand Down
5 changes: 4 additions & 1 deletion llvm/lib/IR/Constants.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -932,7 +932,10 @@ Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
}

ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V, bool isSigned) {
return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned));
// TODO: Avoid implicit trunc?
// See https://github.com/llvm/llvm-project/issues/112510.
return get(Ty->getContext(),
APInt(Ty->getBitWidth(), V, isSigned, /*implicitTrunc=*/true));
}

Constant *ConstantInt::get(Type *Ty, const APInt& V) {
Expand Down
32 changes: 16 additions & 16 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2400,10 +2400,11 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
}
case AArch64ISD::BICi: {
// Compute the bit cleared value.
uint64_t Mask =
~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
APInt Mask =
~(Op->getConstantOperandAPInt(1) << Op->getConstantOperandAPInt(2))
.trunc(Known.getBitWidth());
Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
Known &= KnownBits::makeConstant(Mask);
break;
}
case AArch64ISD::VLSHR: {
Expand Down Expand Up @@ -12839,7 +12840,8 @@ static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
// Benefit form APInt to handle overflow when calculating expected element.
unsigned NumElts = VT.getVectorNumElements();
unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1, /*isSigned=*/false,
/*implicitTrunc=*/true);
// The following shuffle indices must be the successive elements after the
// first real element.
bool FoundWrongElt = std::any_of(FirstRealElt + 1, M.end(), [&](int Elt) {
Expand Down Expand Up @@ -14306,9 +14308,9 @@ static SDValue NormalizeBuildVector(SDValue Op,
// (with operands cast to integers), then the only possibilities
// are constants and UNDEFs.
if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
APInt LowBits(EltTy.getSizeInBits(),
CstLane->getZExtValue());
Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
Lane = DAG.getConstant(
CstLane->getAPIntValue().trunc(EltTy.getSizeInBits()).getZExtValue(),
dl, MVT::i32);
} else if (Lane.getNode()->isUndef()) {
Lane = DAG.getUNDEF(MVT::i32);
} else {
Expand Down Expand Up @@ -23713,7 +23715,7 @@ static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
// Stride does not scale explicitly by 'Scale', because it happens in
// the gather/scatter addressing mode.
Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride));
Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride, true));
return true;
}

Expand Down Expand Up @@ -28729,7 +28731,7 @@ static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2,
unsigned BitsPerElt = VTOp1.getVectorElementType().getSizeInBits();
unsigned IndexLen = MinSVESize / BitsPerElt;
unsigned ElementsPerVectorReg = VTOp1.getVectorNumElements();
uint64_t MaxOffset = APInt(BitsPerElt, -1, false).getZExtValue();
uint64_t MaxOffset = APInt(BitsPerElt, -1, true).getZExtValue();
RKSimon marked this conversation as resolved.
Show resolved Hide resolved
EVT MaskEltType = VTOp1.getVectorElementType().changeTypeToInteger();
EVT MaskType = EVT::getVectorVT(*DAG.getContext(), MaskEltType, IndexLen);
bool MinMaxEqual = (MinSVESize == MaxSVESize);
Expand Down Expand Up @@ -29087,16 +29089,14 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
KnownBits KnownOp0 =
TLO.DAG.computeKnownBits(Op0, OriginalDemandedElts, Depth + 1);
// Op0 &= ~(ConstantOperandVal(1) << ConstantOperandVal(2))
uint64_t BitsToClear = Op->getConstantOperandVal(1)
<< Op->getConstantOperandVal(2);
APInt BitsToClear =
(Op->getConstantOperandAPInt(1) << Op->getConstantOperandAPInt(2))
.trunc(KnownOp0.getBitWidth());
APInt AlreadyZeroedBitsToClear = BitsToClear & KnownOp0.Zero;
if (APInt(Known.getBitWidth(), BitsToClear)
.isSubsetOf(AlreadyZeroedBitsToClear))
if (BitsToClear.isSubsetOf(AlreadyZeroedBitsToClear))
return TLO.CombineTo(Op, Op0);

Known = KnownOp0 &
KnownBits::makeConstant(APInt(Known.getBitWidth(), ~BitsToClear));

Known = KnownOp0 & KnownBits::makeConstant(~BitsToClear);
return false;
}
case ISD::INTRINSIC_WO_CHAIN: {
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3441,7 +3441,8 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
: AMDGPU::V_MOV_B32_e32
: Is64Bit ? AMDGPU::S_MOV_B64_IMM_PSEUDO
: AMDGPU::S_MOV_B32;
APInt Imm(Is64Bit ? 64 : 32, getImmFor(UseMI.getOperand(1)));
APInt Imm(Is64Bit ? 64 : 32, getImmFor(UseMI.getOperand(1)),
/*isSigned=*/true, /*implicitTrunc=*/true);

if (RI.isAGPR(*MRI, DstReg)) {
if (Is64Bit || !isInlineConstant(Imm))
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -213,12 +213,12 @@ static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII,
// that SCC is not live as S_NOT_B32 clobbers it. It's probably not worth
// it, as the reasonable values are already covered by s_movk_i32.
ModifiedImm = ~SrcImm;
if (TII->isInlineConstant(APInt(32, ModifiedImm)))
if (TII->isInlineConstant(APInt(32, ModifiedImm, true)))
return AMDGPU::V_NOT_B32_e32;
}

ModifiedImm = reverseBits<int32_t>(SrcImm);
if (TII->isInlineConstant(APInt(32, ModifiedImm)))
if (TII->isInlineConstant(APInt(32, ModifiedImm, true)))
return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;

return 0;
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1158,7 +1158,8 @@ class ARMOperand : public MCParsedAsmOperand {
bool isFPImm() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
if (!CE || !isUInt<32>(CE->getValue()))
return false;
int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
return Val != -1;
}
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2503,7 +2503,8 @@ APInt HexagonConstEvaluator::getCmpImm(unsigned Opc, unsigned OpX,
}

uint64_t Val = MO.getImm();
return APInt(32, Val, Signed);
// TODO: Is implicitTrunc correct here?
return APInt(32, Val, Signed, /*implicitTrunc=*/true);
}

void HexagonConstEvaluator::replaceWithNop(MachineInstr &MI) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/Hexagon/HexagonGenExtract.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ bool HexagonGenExtract::convert(Instruction *In) {
// this value.
if (!LogicalSR && (SR > SL))
return false;
APInt A = APInt(BW, ~0ULL).lshr(SR).shl(SL);
APInt A = APInt(BW, ~0ULL, true).lshr(SR).shl(SL);
RKSimon marked this conversation as resolved.
Show resolved Hide resolved
CM = ConstantInt::get(Ctx, A);
}

Expand Down
Loading
Loading