Skip to content

Commit

Permalink
JIT: Specialize fgFindJumpTargets (#103761)
Browse files Browse the repository at this point in the history
This specializes `fgFindJumpTargets` based on whether we are making
inline observations or not. This function has a lot of conditional logic
based on this property, but it is always going to be false in MinOpts,
so this allows the native compiler to optimize the function much better
(and avoids all the conditionals).

Using the VS profiler shows that the relative CPU time spent in the
function goes from 0.98% to 0.47% for the MinOpts contexts in
benchmarks.run_pgo.windows.x64.
  • Loading branch information
jakobbotsch authored Jun 20, 2024
1 parent 032b317 commit b0f3c19
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 9 deletions.
1 change: 1 addition & 0 deletions src/coreclr/jit/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -6315,6 +6315,7 @@ class Compiler

bool fgMayExplicitTailCall();

template<bool makeInlineObservations>
void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget);

void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock);
Expand Down
28 changes: 19 additions & 9 deletions src/coreclr/jit/fgbasic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1064,13 +1064,16 @@ class FgStack
//------------------------------------------------------------------------
// fgFindJumpTargets: walk the IL stream, determining jump target offsets
//
// Type arguments:
// makeInlineObservations - whether or not to record inline observations about the method
//
// Arguments:
// codeAddr - base address of the IL code buffer
// codeSize - number of bytes in the IL code buffer
// jumpTarget - [OUT] bit vector for flagging jump targets
//
// Notes:
// If inlining or prejitting the root, this method also makes
// If "makeInlineObservations" is true this method also makes
// various observations about the method that factor into inline
// decisions.
//
Expand All @@ -1080,6 +1083,7 @@ class FgStack
//
// Also sets m_addrExposed and lvHasILStoreOp, ilHasMultipleILStoreOp in lvaTable[].
//
template <bool makeInlineObservations>
void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget)
{
const BYTE* codeBegp = codeAddr;
Expand All @@ -1088,13 +1092,12 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed
var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type
bool typeIsNormed = false;
FgStack pushedStack;
const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
const bool makeInlineObservations = (compInlineResult != nullptr);
const bool isInlining = compIsForInlining();
unsigned retBlocks = 0;
int prefixFlags = 0;
bool preciseScan = makeInlineObservations && compInlineResult->GetPolicy()->RequiresPreciseScan();
const bool resolveTokens = preciseScan;
const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0;
const bool isInlining = compIsForInlining();
unsigned retBlocks = 0;
int prefixFlags = 0;
bool preciseScan = makeInlineObservations && compInlineResult->GetPolicy()->RequiresPreciseScan();
const bool resolveTokens = preciseScan;

// Track offsets where IL instructions begin in DEBUG builds. Used to
// validate debug info generated by the JIT.
Expand Down Expand Up @@ -3614,7 +3617,14 @@ void Compiler::fgFindBasicBlocks()
FixedBitVect* jumpTarget = FixedBitVect::bitVectInit(info.compILCodeSize + 1, this);

// Walk the instrs to find all jump targets
fgFindJumpTargets(info.compCode, info.compILCodeSize, jumpTarget);
if (compInlineResult != nullptr)
{
fgFindJumpTargets<true>(info.compCode, info.compILCodeSize, jumpTarget);
}
else
{
fgFindJumpTargets<false>(info.compCode, info.compILCodeSize, jumpTarget);
}
if (compDonotInline())
{
return;
Expand Down

0 comments on commit b0f3c19

Please sign in to comment.