Skip to content

[TTI] Constify BasicTTIImplBase::thisT() (NFCI) #136575

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Apr 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ class TargetTransformInfoImplBase {

const DataLayout &getDataLayout() const { return DL; }

// FIXME: It looks like this implementation is dead. All clients appear to
// use the (non-const) version from `TargetTransformInfoImplCRTPBase`.
InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands, Type *AccessType,
TTI::TargetCostKind CostKind) const {
Expand Down
94 changes: 50 additions & 44 deletions llvm/include/llvm/CodeGen/BasicTTIImpl.h

Large diffs are not rendered by default.

47 changes: 24 additions & 23 deletions llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ bool AArch64TTIImpl::shouldMaximizeVectorBandwidth(
/// Calculate the cost of materializing a 64-bit value. This helper
/// method might only calculate a fraction of a larger immediate. Therefore it
/// is valid to return a cost of ZERO.
InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) const {
// Check if the immediate can be encoded within an instruction.
if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
return 0;
Expand All @@ -386,8 +386,9 @@ InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
}

/// Calculate the cost of materializing the given constant.
InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) {
InstructionCost
AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) const {
assert(Ty->isIntegerTy());

unsigned BitSize = Ty->getPrimitiveSizeInBits();
Expand Down Expand Up @@ -577,7 +578,7 @@ static InstructionCost getHistogramCost(const IntrinsicCostAttributes &ICA) {

InstructionCost
AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) {
TTI::TargetCostKind CostKind) const {
// The code-generator is currently not able to handle scalable vectors
// of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
// it. This change will be removed when code-generation for these types is
Expand Down Expand Up @@ -2806,7 +2807,7 @@ AArch64TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {

bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
ArrayRef<const Value *> Args,
Type *SrcOverrideTy) {
Type *SrcOverrideTy) const {
// A helper that returns a vector type from the given type. The number of
// elements in type Ty determines the vector width.
auto toVectorTy = [&](Type *ArgTy) {
Expand Down Expand Up @@ -2903,7 +2904,7 @@ bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
// trunc i16 (lshr (add %x, %y), 1) -> i8
//
bool AArch64TTIImpl::isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst,
Type *Src) {
Type *Src) const {
// The source should be a legal vector type.
if (!Src->isVectorTy() || !TLI->isTypeLegal(TLI->getValueType(DL, Src)) ||
(Src->isScalableTy() && !ST->hasSVE2()))
Expand Down Expand Up @@ -2948,7 +2949,7 @@ InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I) {
const Instruction *I) const {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
// If the cast is observable, and it is used by a widening instruction (e.g.,
Expand Down Expand Up @@ -3619,7 +3620,7 @@ InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,

InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
TTI::TargetCostKind CostKind,
const Instruction *I) {
const Instruction *I) const {
if (CostKind != TTI::TCK_RecipThroughput)
return Opcode == Instruction::PHI ? 0 : 1;
assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
Expand All @@ -3630,7 +3631,7 @@ InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(
unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
bool HasRealUse, const Instruction *I, Value *Scalar,
ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) {
ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const {
assert(Val->isVectorTy() && "This must be a vector type");

if (Index != -1U) {
Expand Down Expand Up @@ -3802,7 +3803,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(
InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
TTI::TargetCostKind CostKind,
unsigned Index, Value *Op0,
Value *Op1) {
Value *Op1) const {
bool HasRealUse =
Opcode == Instruction::InsertElement && Op0 && !isa<UndefValue>(Op0);
return getVectorInstrCostHelper(Opcode, Val, CostKind, Index, HasRealUse);
Expand All @@ -3826,7 +3827,7 @@ InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I,

InstructionCost AArch64TTIImpl::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) {
TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
if (isa<ScalableVectorType>(Ty))
return InstructionCost::getInvalid();
if (Ty->getElementType()->isFloatingPointTy())
Expand All @@ -3840,8 +3841,7 @@ InstructionCost AArch64TTIImpl::getScalarizationOverhead(
InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
ArrayRef<const Value *> Args,
const Instruction *CxtI) {
ArrayRef<const Value *> Args, const Instruction *CxtI) const {

// The code-generator is currently not able to handle scalable vectors
// of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
Expand Down Expand Up @@ -4171,7 +4171,7 @@ InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
InstructionCost AArch64TTIImpl::getCmpSelInstrCost(
unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
TTI::OperandValueInfo Op2Info, const Instruction *I) {
TTI::OperandValueInfo Op2Info, const Instruction *I) const {
// TODO: Handle other cost kinds.
if (CostKind != TTI::TCK_RecipThroughput)
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
Expand Down Expand Up @@ -4284,7 +4284,7 @@ bool AArch64TTIImpl::prefersVectorizedAddressing() const {
InstructionCost
AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind) {
TTI::TargetCostKind CostKind) const {
if (useNeonVector(Src))
return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
CostKind);
Expand Down Expand Up @@ -4331,7 +4331,7 @@ static unsigned getSVEGatherScatterOverhead(unsigned Opcode,

InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
if (useNeonVector(DataTy) || !isLegalMaskedGatherScatter(DataTy))
return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
Alignment, CostKind, I);
Expand Down Expand Up @@ -4371,7 +4371,7 @@ InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
const Instruction *I) {
const Instruction *I) const {
EVT VT = TLI->getValueType(DL, Ty, true);
// Type legalization can't handle structs
if (VT == MVT::Other)
Expand Down Expand Up @@ -4980,7 +4980,7 @@ bool AArch64TTIImpl::isLegalToVectorizeReduction(
InstructionCost
AArch64TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
FastMathFlags FMF,
TTI::TargetCostKind CostKind) {
TTI::TargetCostKind CostKind) const {
// The code-generator is currently not able to handle scalable vectors
// of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
// it. This change will be removed when code-generation for these types is
Expand All @@ -5005,7 +5005,7 @@ AArch64TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
}

InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) const {
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
InstructionCost LegalizationCost = 0;
if (LT.first > 1) {
Expand All @@ -5032,7 +5032,7 @@ InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
InstructionCost
AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
std::optional<FastMathFlags> FMF,
TTI::TargetCostKind CostKind) {
TTI::TargetCostKind CostKind) const {
// The code-generator is currently not able to handle scalable vectors
// of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
// it. This change will be removed when code-generation for these types is
Expand Down Expand Up @@ -5207,8 +5207,9 @@ AArch64TTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
return BaseT::getMulAccReductionCost(IsUnsigned, ResTy, VecTy, CostKind);
}

InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index,
TTI::TargetCostKind CostKind) {
InstructionCost
AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index,
TTI::TargetCostKind CostKind) const {
static const CostTblEntry ShuffleTbl[] = {
{ TTI::SK_Splice, MVT::nxv16i8, 1 },
{ TTI::SK_Splice, MVT::nxv8i16, 1 },
Expand Down Expand Up @@ -5340,7 +5341,7 @@ InstructionCost AArch64TTIImpl::getPartialReductionCost(
InstructionCost AArch64TTIImpl::getShuffleCost(
TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask,
TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
ArrayRef<const Value *> Args, const Instruction *CxtI) {
ArrayRef<const Value *> Args, const Instruction *CxtI) const {
std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);

// If we have a Mask, and the LT is being legalized somehow, split the Mask
Expand Down
60 changes: 32 additions & 28 deletions llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {

bool isWideningInstruction(Type *DstTy, unsigned Opcode,
ArrayRef<const Value *> Args,
Type *SrcOverrideTy = nullptr);
Type *SrcOverrideTy = nullptr) const;

// A helper function called by 'getVectorInstrCost'.
//
Expand All @@ -75,7 +75,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
InstructionCost getVectorInstrCostHelper(
unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
bool HasRealUse, const Instruction *I = nullptr, Value *Scalar = nullptr,
ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx = {});
ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx = {}) const;

public:
explicit AArch64TTIImpl(const AArch64TargetMachine *TM, const Function &F)
Expand All @@ -99,9 +99,9 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
/// @{

using BaseT::getIntImmCost;
InstructionCost getIntImmCost(int64_t Val);
InstructionCost getIntImmCost(int64_t Val) const;
InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind);
TTI::TargetCostKind CostKind) const;
InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind,
Expand Down Expand Up @@ -131,7 +131,7 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
}

InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind);
TTI::TargetCostKind CostKind) const;

std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
IntrinsicInst &II) const;
Expand Down Expand Up @@ -173,30 +173,32 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {

InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind);
TTI::TargetCostKind CostKind) const;

InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
Align Alignment,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr);
const Instruction *I = nullptr) const;

bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst, Type *Src);
bool isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst,
Type *Src) const;

InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr);
const Instruction *I = nullptr) const;

InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
VectorType *VecTy, unsigned Index);

InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr);
const Instruction *I = nullptr) const;

InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
TTI::TargetCostKind CostKind,
unsigned Index, Value *Op0, Value *Op1);
unsigned Index, Value *Op0,
Value *Op1) const;

/// \param ScalarUserAndIdx encodes the information about extracts from a
/// vector with 'Scalar' being the value being extracted,'User' being the user
Expand All @@ -213,20 +215,21 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {

InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
FastMathFlags FMF,
TTI::TargetCostKind CostKind);
TTI::TargetCostKind CostKind) const;

InstructionCost getArithmeticReductionCostSVE(unsigned Opcode,
VectorType *ValTy,
TTI::TargetCostKind CostKind);
InstructionCost
getArithmeticReductionCostSVE(unsigned Opcode, VectorType *ValTy,
TTI::TargetCostKind CostKind) const;

InstructionCost getSpliceCost(VectorType *Tp, int Index,
TTI::TargetCostKind CostKind);
TTI::TargetCostKind CostKind) const;

InstructionCost getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
ArrayRef<const Value *> Args = {},
const Instruction *CxtI = nullptr) const;

InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
const SCEV *Ptr);
Expand All @@ -236,17 +239,17 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr);
const Instruction *I = nullptr) const;

TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const;
bool useNeonVector(const Type *Ty) const;

InstructionCost
getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
unsigned AddressSpace, TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr);
InstructionCost getMemoryOpCost(
unsigned Opcode, Type *Src, MaybeAlign Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
const Instruction *I = nullptr) const;

InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys);

Expand Down Expand Up @@ -423,9 +426,10 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
return ST->hasSVE();
}

InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
std::optional<FastMathFlags> FMF,
TTI::TargetCostKind CostKind);
InstructionCost
getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
std::optional<FastMathFlags> FMF,
TTI::TargetCostKind CostKind) const;

InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
Type *ResTy, VectorType *ValTy,
Expand All @@ -441,13 +445,13 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
TTI::TargetCostKind CostKind, int Index,
VectorType *SubTp,
ArrayRef<const Value *> Args = {},
const Instruction *CxtI = nullptr);
const Instruction *CxtI = nullptr) const;

InstructionCost getScalarizationOverhead(VectorType *Ty,
const APInt &DemandedElts,
bool Insert, bool Extract,
TTI::TargetCostKind CostKind,
ArrayRef<Value *> VL = {});
ArrayRef<Value *> VL = {}) const;

/// Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store
Expand Down
Loading