Skip to content

Revert "[MemProf] Optionally save context size info on largest cold allocations" #142688

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 1 addition & 18 deletions llvm/include/llvm/Analysis/MemoryProfileInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,18 +24,6 @@ class OptimizationRemarkEmitter;

namespace memprof {

/// Whether the alloc memeprof metadata will include context size info for all
/// MIBs.
LLVM_ABI bool metadataIncludesAllContextSizeInfo();

/// Whether the alloc memprof metadata may include context size info for some
/// MIBs (but possibly not all).
LLVM_ABI bool metadataMayIncludeContextSizeInfo();

/// Whether we need to record the context size info in the alloc trie used to
/// build metadata.
LLVM_ABI bool recordContextSizeInfoForAnalysis();

/// Build callstack metadata from the provided list of call stack ids. Returns
/// the resulting metadata node.
LLVM_ABI MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack,
Expand Down Expand Up @@ -99,9 +87,6 @@ class CallStackTrie {
// allocations for which we apply non-context sensitive allocation hints.
OptimizationRemarkEmitter *ORE;

// The maximum size of a cold allocation context, from the profile summary.
uint64_t MaxColdSize;

void deleteTrieNode(CallStackTrieNode *Node) {
if (!Node)
return;
Expand All @@ -128,9 +113,7 @@ class CallStackTrie {
uint64_t &ColdBytes);

public:
CallStackTrie(OptimizationRemarkEmitter *ORE = nullptr,
uint64_t MaxColdSize = 0)
: ORE(ORE), MaxColdSize(MaxColdSize) {}
CallStackTrie(OptimizationRemarkEmitter *ORE = nullptr) : ORE(ORE) {}
~CallStackTrie() { deleteTrieNode(Alloc); }

bool empty() const { return Alloc == nullptr; }
Expand Down
48 changes: 9 additions & 39 deletions llvm/lib/Analysis/MemoryProfileInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,25 +46,6 @@ cl::opt<unsigned> MinCallsiteColdBytePercent(
cl::desc("Min percent of cold bytes at a callsite to discard non-cold "
"contexts"));

// Enable saving context size information for largest cold contexts, which can
// be used to flag contexts for more aggressive cloning and reporting.
cl::opt<unsigned> MinPercentMaxColdSize(
"memprof-min-percent-max-cold-size", cl::init(100), cl::Hidden,
cl::desc("Min percent of max cold bytes for critical cold context"));

bool llvm::memprof::metadataIncludesAllContextSizeInfo() {
return MemProfReportHintedSizes || MinClonedColdBytePercent < 100;
}

bool llvm::memprof::metadataMayIncludeContextSizeInfo() {
return metadataIncludesAllContextSizeInfo() || MinPercentMaxColdSize < 100;
}

bool llvm::memprof::recordContextSizeInfoForAnalysis() {
return metadataMayIncludeContextSizeInfo() ||
MinCallsiteColdBytePercent < 100;
}

MDNode *llvm::memprof::buildCallstackMetadata(ArrayRef<uint64_t> CallStack,
LLVMContext &Ctx) {
SmallVector<Metadata *, 8> StackVals;
Expand Down Expand Up @@ -187,8 +168,7 @@ void CallStackTrie::addCallStack(MDNode *MIB) {
static MDNode *createMIBNode(LLVMContext &Ctx, ArrayRef<uint64_t> MIBCallStack,
AllocationType AllocType,
ArrayRef<ContextTotalSize> ContextSizeInfo,
const uint64_t MaxColdSize, uint64_t &TotalBytes,
uint64_t &ColdBytes) {
uint64_t &TotalBytes, uint64_t &ColdBytes) {
SmallVector<Metadata *> MIBPayload(
{buildCallstackMetadata(MIBCallStack, Ctx)});
MIBPayload.push_back(
Expand All @@ -204,21 +184,12 @@ static MDNode *createMIBNode(LLVMContext &Ctx, ArrayRef<uint64_t> MIBCallStack,

for (const auto &[FullStackId, TotalSize] : ContextSizeInfo) {
TotalBytes += TotalSize;
bool LargeColdContext = false;
if (AllocType == AllocationType::Cold) {
if (AllocType == AllocationType::Cold)
ColdBytes += TotalSize;
// If we have the max cold context size from summary information and have
// requested identification of contexts above a percentage of the max, see
// if this context qualifies.
if (MaxColdSize > 0 && MinPercentMaxColdSize < 100 &&
TotalSize * 100 >= MaxColdSize * MinPercentMaxColdSize)
LargeColdContext = true;
}
// Only add the context size info as metadata if we need it in the thin
// link (currently if reporting of hinted sizes is enabled, we have
// specified a threshold for marking allocations cold after cloning, or we
// have identified this as a large cold context of interest above).
if (metadataIncludesAllContextSizeInfo() || LargeColdContext) {
// link (currently if reporting of hinted sizes is enabled or we have
// specified a threshold for marking allocations cold after cloning).
if (MemProfReportHintedSizes || MinClonedColdBytePercent < 100) {
auto *FullStackIdMD = ValueAsMetadata::get(
ConstantInt::get(Type::getInt64Ty(Ctx), FullStackId));
auto *TotalSizeMD = ValueAsMetadata::get(
Expand Down Expand Up @@ -386,9 +357,9 @@ bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
if (hasSingleAllocType(Node->AllocTypes)) {
std::vector<ContextTotalSize> ContextSizeInfo;
collectContextSizeInfo(Node, ContextSizeInfo);
MIBNodes.push_back(
createMIBNode(Ctx, MIBCallStack, (AllocationType)Node->AllocTypes,
ContextSizeInfo, MaxColdSize, TotalBytes, ColdBytes));
MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack,
(AllocationType)Node->AllocTypes,
ContextSizeInfo, TotalBytes, ColdBytes));
return true;
}

Expand Down Expand Up @@ -442,8 +413,7 @@ bool CallStackTrie::buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
std::vector<ContextTotalSize> ContextSizeInfo;
collectContextSizeInfo(Node, ContextSizeInfo);
MIBNodes.push_back(createMIBNode(Ctx, MIBCallStack, AllocationType::NotCold,
ContextSizeInfo, MaxColdSize, TotalBytes,
ColdBytes));
ContextSizeInfo, TotalBytes, ColdBytes));
return true;
}

Expand Down
25 changes: 3 additions & 22 deletions llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,6 @@ static void computeFunctionSummary(
if (MemProfMD) {
std::vector<MIBInfo> MIBs;
std::vector<std::vector<ContextTotalSize>> ContextSizeInfos;
bool HasNonZeroContextSizeInfos = false;
for (auto &MDOp : MemProfMD->operands()) {
auto *MIBMD = cast<const MDNode>(MDOp);
MDNode *StackNode = getMIBStackNode(MIBMD);
Expand All @@ -545,8 +544,7 @@ static void computeFunctionSummary(
}
// If we have context size information, collect it for inclusion in
// the summary.
assert(MIBMD->getNumOperands() > 2 ||
!metadataIncludesAllContextSizeInfo());
assert(MIBMD->getNumOperands() > 2 || !MemProfReportHintedSizes);
if (MIBMD->getNumOperands() > 2) {
std::vector<ContextTotalSize> ContextSizes;
for (unsigned I = 2; I < MIBMD->getNumOperands(); I++) {
Expand All @@ -560,31 +558,14 @@ static void computeFunctionSummary(
->getZExtValue();
ContextSizes.push_back({FullStackId, TS});
}
// Flag that we need to keep the ContextSizeInfos array for this
// alloc as it now contains non-zero context info sizes.
HasNonZeroContextSizeInfos = true;
ContextSizeInfos.push_back(std::move(ContextSizes));
} else {
// The ContextSizeInfos must be in the same relative position as the
// associated MIB. In some cases we only include a ContextSizeInfo
// for a subset of MIBs in an allocation. To handle that, eagerly
// fill any MIB entries that don't have context size info metadata
// with a pair of 0s. Later on we will only use this array if it
// ends up containing any non-zero entries (see where we set
// HasNonZeroContextSizeInfos above).
ContextSizeInfos.push_back({{0, 0}});
}
MIBs.push_back(
MIBInfo(getMIBAllocType(MIBMD), std::move(StackIdIndices)));
}
Allocs.push_back(AllocInfo(std::move(MIBs)));
assert(HasNonZeroContextSizeInfos ||
!metadataIncludesAllContextSizeInfo());
// We eagerly build the ContextSizeInfos array, but it will be filled
// with sub arrays of pairs of 0s if no MIBs on this alloc actually
// contained context size info metadata. Only save it if any MIBs had
// any such metadata.
if (HasNonZeroContextSizeInfos) {
assert(!ContextSizeInfos.empty() || !MemProfReportHintedSizes);
if (!ContextSizeInfos.empty()) {
assert(Allocs.back().MIBs.size() == ContextSizeInfos.size());
Allocs.back().ContextSizeInfos = std::move(ContextSizeInfos);
}
Expand Down
8 changes: 0 additions & 8 deletions llvm/lib/Bitcode/Reader/BitcodeReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8164,14 +8164,6 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
ContextSizes.reserve(NumContextSizeInfoEntries);
for (unsigned J = 0; J < NumContextSizeInfoEntries; J++) {
assert(ContextIdIndex < PendingContextIds.size());
// Skip any 0 entries for MIBs without the context size info.
if (PendingContextIds[ContextIdIndex] == 0) {
// The size should also be 0 if the context was 0.
assert(!Record[I]);
ContextIdIndex++;
I++;
continue;
}
// PendingContextIds read from the preceding FS_ALLOC_CONTEXT_IDS
// should be in the same order as the total sizes.
ContextSizes.push_back(
Expand Down
26 changes: 8 additions & 18 deletions llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/MemoryProfileInfo.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Bitcode/BitcodeCommon.h"
#include "llvm/Bitcode/BitcodeReader.h"
Expand Down Expand Up @@ -4586,23 +4585,14 @@ void ModuleBitcodeWriterBase::writePerModuleGlobalValueSummary() {
Stream.EmitRecord(bitc::FS_STACK_IDS, Vals, StackIdAbbvId);
}

unsigned ContextIdAbbvId = 0;
if (metadataMayIncludeContextSizeInfo()) {
// n x context id
auto ContextIdAbbv = std::make_shared<BitCodeAbbrev>();
ContextIdAbbv->Add(BitCodeAbbrevOp(bitc::FS_ALLOC_CONTEXT_IDS));
ContextIdAbbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
// The context ids are hashes that are close to 64 bits in size, so emitting
// as a pair of 32-bit fixed-width values is more efficient than a VBR if we
// are emitting them for all MIBs. Otherwise we use VBR to better compress 0
// values that are expected to more frequently occur in an alloc's memprof
// summary.
if (metadataIncludesAllContextSizeInfo())
ContextIdAbbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
else
ContextIdAbbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
ContextIdAbbvId = Stream.EmitAbbrev(std::move(ContextIdAbbv));
}
// n x context id
auto ContextIdAbbv = std::make_shared<BitCodeAbbrev>();
ContextIdAbbv->Add(BitCodeAbbrevOp(bitc::FS_ALLOC_CONTEXT_IDS));
ContextIdAbbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
// The context ids are hashes that are close to 64 bits in size, so emitting
// as a pair of 32-bit fixed-width values is more efficient than a VBR.
ContextIdAbbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
unsigned ContextIdAbbvId = Stream.EmitAbbrev(std::move(ContextIdAbbv));

// Abbrev for FS_PERMODULE_PROFILE.
Abbv = std::make_shared<BitCodeAbbrev>();
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2232,8 +2232,9 @@ IndexCallsiteContextGraph::IndexCallsiteContextGraph(
CallStack<MIBInfo, SmallVector<unsigned>::const_iterator>
EmptyContext;
unsigned I = 0;
assert(!metadataMayIncludeContextSizeInfo() ||
AN.ContextSizeInfos.size() == AN.MIBs.size());
assert(
(!MemProfReportHintedSizes && MinClonedColdBytePercent >= 100) ||
AN.ContextSizeInfos.size() == AN.MIBs.size());
// Now add all of the MIBs and their stack nodes.
for (auto &MIB : AN.MIBs) {
CallStack<MIBInfo, SmallVector<unsigned>::const_iterator>
Expand Down
25 changes: 16 additions & 9 deletions llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,10 @@ static cl::opt<bool> ClMemProfAttachCalleeGuids(
"Attach calleeguids as value profile metadata for indirect calls."),
cl::init(true), cl::Hidden);

extern cl::opt<bool> MemProfReportHintedSizes;
extern cl::opt<unsigned> MinClonedColdBytePercent;
extern cl::opt<unsigned> MinCallsiteColdBytePercent;

static cl::opt<unsigned> MinMatchedColdBytePercent(
"memprof-matching-cold-threshold", cl::init(100), cl::Hidden,
cl::desc("Min percent of cold bytes matched to hint allocation cold"));
Expand Down Expand Up @@ -295,6 +299,13 @@ class ModuleMemProfiler {
Function *MemProfCtorFunction = nullptr;
};

// Options under which we need to record the context size info in the alloc trie
// used to build metadata.
bool recordContextSizeInfo() {
return MemProfReportHintedSizes || MinClonedColdBytePercent < 100 ||
MinCallsiteColdBytePercent < 100;
}

} // end anonymous namespace

MemProfilerPass::MemProfilerPass() = default;
Expand Down Expand Up @@ -747,7 +758,7 @@ static AllocationType addCallStack(CallStackTrie &AllocTrie,
AllocInfo->Info.getAllocCount(),
AllocInfo->Info.getTotalLifetime());
std::vector<ContextTotalSize> ContextSizeInfo;
if (recordContextSizeInfoForAnalysis()) {
if (recordContextSizeInfo()) {
auto TotalSize = AllocInfo->Info.getTotalSize();
assert(TotalSize);
assert(FullStackId != 0);
Expand Down Expand Up @@ -992,7 +1003,7 @@ static void readMemprof(Module &M, Function &F,
&FullStackIdToAllocMatchInfo,
std::set<std::vector<uint64_t>> &MatchedCallSites,
DenseMap<uint64_t, LocToLocMap> &UndriftMaps,
OptimizationRemarkEmitter &ORE, uint64_t MaxColdSize) {
OptimizationRemarkEmitter &ORE) {
auto &Ctx = M.getContext();
// Previously we used getIRPGOFuncName() here. If F is local linkage,
// getIRPGOFuncName() returns FuncName with prefix 'FileName;'. But
Expand Down Expand Up @@ -1181,7 +1192,7 @@ static void readMemprof(Module &M, Function &F,
// We may match this instruction's location list to multiple MIB
// contexts. Add them to a Trie specialized for trimming the contexts to
// the minimal needed to disambiguate contexts with unique behavior.
CallStackTrie AllocTrie(&ORE, MaxColdSize);
CallStackTrie AllocTrie(&ORE);
uint64_t TotalSize = 0;
uint64_t TotalColdSize = 0;
for (auto *AllocInfo : AllocInfoIter->second) {
Expand All @@ -1192,7 +1203,7 @@ static void readMemprof(Module &M, Function &F,
InlinedCallStack)) {
NumOfMemProfMatchedAllocContexts++;
uint64_t FullStackId = 0;
if (ClPrintMemProfMatchInfo || recordContextSizeInfoForAnalysis())
if (ClPrintMemProfMatchInfo || recordContextSizeInfo())
FullStackId = computeFullStackId(AllocInfo->CallStack);
auto AllocType = addCallStack(AllocTrie, AllocInfo, FullStackId);
TotalSize += AllocInfo->Info.getTotalSize();
Expand Down Expand Up @@ -1329,18 +1340,14 @@ PreservedAnalyses MemProfUsePass::run(Module &M, ModuleAnalysisManager &AM) {
// call stack.
std::set<std::vector<uint64_t>> MatchedCallSites;

uint64_t MaxColdSize = 0;
if (auto *MemProfSum = MemProfReader->getMemProfSummary())
MaxColdSize = MemProfSum->getMaxColdTotalSize();

for (auto &F : M) {
if (F.isDeclaration())
continue;

const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
readMemprof(M, F, MemProfReader.get(), TLI, FullStackIdToAllocMatchInfo,
MatchedCallSites, UndriftMaps, ORE, MaxColdSize);
MatchedCallSites, UndriftMaps, ORE);
}

if (ClPrintMemProfMatchInfo) {
Expand Down
73 changes: 0 additions & 73 deletions llvm/test/ThinLTO/X86/memprof-report-hinted-partial.ll

This file was deleted.

Loading
Loading