Skip to content

Commit 628a084

Browse files
authored
JIT: don't import IL for partial compilation blocks (#61572)
Adjust partial comp not to import IL instead of importing it and then deleting the IR. Saves some time and also sometimes a bit of stack space as we won't create as many temps. Update both PC and OSR to check for blocks in handlers early, rather than pretending to support these and then backing out later.
1 parent d459c65 commit 628a084

File tree

6 files changed

+102
-66
lines changed

6 files changed

+102
-66
lines changed

src/coreclr/jit/compiler.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1873,6 +1873,7 @@ void Compiler::compInit(ArenaAllocator* pAlloc,
18731873
compJmpOpUsed = false;
18741874
compLongUsed = false;
18751875
compTailCallUsed = false;
1876+
compLocallocSeen = false;
18761877
compLocallocUsed = false;
18771878
compLocallocOptimized = false;
18781879
compQmarkRationalized = false;

src/coreclr/jit/compiler.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9377,6 +9377,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
93779377
bool compLongUsed; // Does the method use TYP_LONG
93789378
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
93799379
bool compTailCallUsed; // Does the method do a tailcall
9380+
bool compLocallocSeen; // Does the method IL have localloc opcode
93809381
bool compLocallocUsed; // Does the method use localloc.
93819382
bool compLocallocOptimized; // Does the method have an optimized localloc
93829383
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
@@ -10192,6 +10193,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
1019210193
return !info.compInitMem && opts.compDbgCode;
1019310194
}
1019410195

10196+
// Returns true if the jit supports having patchpoints in this method.
10197+
// Optionally, get the reason why not.
10198+
bool compCanHavePatchpoints(const char** reason = nullptr);
10199+
1019510200
#if defined(DEBUG)
1019610201

1019710202
void compDispLocalVars();

src/coreclr/jit/compiler.hpp

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4717,6 +4717,45 @@ inline void LclVarDsc::setLvRefCntWtd(weight_t newValue, RefCountState state)
47174717
m_lvRefCntWtd = newValue;
47184718
}
47194719

4720+
//------------------------------------------------------------------------------
4721+
// compCanHavePatchpoints: return true if patchpoints are supported in this
4722+
// method.
4723+
//
4724+
// Arguments:
4725+
// reason - [out, optional] reason why patchpoints are not supported
4726+
//
4727+
// Returns:
4728+
// True if patchpoints are supported in this method.
4729+
//
4730+
inline bool Compiler::compCanHavePatchpoints(const char** reason)
4731+
{
4732+
const char* whyNot = nullptr;
4733+
4734+
#ifdef FEATURE_ON_STACK_REPLACEMENT
4735+
if (compLocallocSeen)
4736+
{
4737+
whyNot = "localloc";
4738+
}
4739+
else if ((info.compFlags & CORINFO_FLG_SYNCH) != 0)
4740+
{
4741+
whyNot = "synchronized method";
4742+
}
4743+
else if (opts.IsReversePInvoke())
4744+
{
4745+
whyNot = "reverse pinvoke";
4746+
}
4747+
#else
4748+
whyNot = "OSR feature not defined in build";
4749+
#endif
4750+
4751+
if (reason != nullptr)
4752+
{
4753+
*reason = whyNot;
4754+
}
4755+
4756+
return whyNot == nullptr;
4757+
}
4758+
47204759
/*****************************************************************************/
47214760
#endif //_COMPILER_HPP_
47224761
/*****************************************************************************/

src/coreclr/jit/fgbasic.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2015,6 +2015,8 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed
20152015

20162016
case CEE_LOCALLOC:
20172017

2018+
compLocallocSeen = true;
2019+
20182020
// We now allow localloc callees to become candidates in some cases.
20192021
if (makeInlineObservations)
20202022
{

src/coreclr/jit/importer.cpp

Lines changed: 35 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11593,16 +11593,21 @@ void Compiler::impImportBlockCode(BasicBlock* block)
1159311593
// Are there any places in the method where we might add a patchpoint?
1159411594
if (compHasBackwardJump)
1159511595
{
11596-
// Are patchpoints enabled?
11597-
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0))
11596+
// Are patchpoints enabled and supported?
11597+
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0) &&
11598+
compCanHavePatchpoints())
1159811599
{
1159911600
// We don't inline at Tier0, if we do, we may need rethink our approach.
1160011601
// Could probably support inlines that don't introduce flow.
1160111602
assert(!compIsForInlining());
1160211603

1160311604
// Is the start of this block a suitable patchpoint?
11604-
// Current strategy is blocks that are stack-empty and backwards branch targets
11605-
if (block->bbFlags & BBF_BACKWARD_JUMP_TARGET && (verCurrentState.esStackDepth == 0))
11605+
// Current strategy is blocks that are stack-empty and backwards branch targets and not in a handler
11606+
//
11607+
// Todo (perhaps): bail out of OSR and jit this method with optimization.
11608+
//
11609+
if (!block->hasHndIndex() && ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) &&
11610+
(verCurrentState.esStackDepth == 0))
1160611611
{
1160711612
block->bbFlags |= BBF_PATCHPOINT;
1160811613
setMethodHasPatchpoint();
@@ -11627,12 +11632,33 @@ void Compiler::impImportBlockCode(BasicBlock* block)
1162711632
//
1162811633
// Todo: stress mode...
1162911634
//
11630-
if ((JitConfig.TC_PartialCompilation() > 0) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) &&
11631-
(block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
11632-
((block->bbFlags & BBF_PATCHPOINT) == 0))
11635+
if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) &&
11636+
compCanHavePatchpoints())
1163311637
{
11634-
block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT;
11635-
setMethodHasPartialCompilationPatchpoint();
11638+
// Is this block a good place for partial compilation?
11639+
//
11640+
if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) &&
11641+
((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex())
11642+
{
11643+
JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum);
11644+
block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT;
11645+
setMethodHasPartialCompilationPatchpoint();
11646+
11647+
// Change block to BBJ_THROW so we won't trigger importation of sucessors.
11648+
//
11649+
block->bbJumpKind = BBJ_THROW;
11650+
11651+
// If this method has a explicit generic context, the only uses of it may be in
11652+
// the IL for this block. So assume it's used.
11653+
//
11654+
if (info.compMethodInfo->options &
11655+
(CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE))
11656+
{
11657+
lvaGenericsContextInUse = true;
11658+
}
11659+
11660+
return;
11661+
}
1163611662
}
1163711663

1163811664
#endif // FEATURE_ON_STACK_REPLACEMENT

src/coreclr/jit/patchpoint.cpp

Lines changed: 20 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -57,35 +57,33 @@ class PatchpointTransformer
5757
{
5858
if (block->bbFlags & BBF_PATCHPOINT)
5959
{
60+
// We can't OSR from funclets.
61+
//
62+
assert(!block->hasHndIndex());
63+
6064
// Clear the patchpoint flag.
6165
//
6266
block->bbFlags &= ~BBF_PATCHPOINT;
6367

64-
// If block is in a handler region, don't insert a patchpoint.
65-
// We can't OSR from funclets.
66-
//
67-
// TODO: check this earlier, somehow, and fall back to fully
68-
// optimizing the method (ala QJFL=0).
69-
if (compiler->ehGetBlockHndDsc(block) != nullptr)
70-
{
71-
JITDUMP("Patchpoint: skipping patchpoint for " FMT_BB " as it is in a handler\n", block->bbNum);
72-
continue;
73-
}
74-
75-
JITDUMP("Patchpoint: loop patchpoint in " FMT_BB "\n", block->bbNum);
76-
assert(block != compiler->fgFirstBB);
68+
JITDUMP("Patchpoint: regular patchpoint in " FMT_BB "\n", block->bbNum);
7769
TransformBlock(block);
7870
count++;
7971
}
8072
else if (block->bbFlags & BBF_PARTIAL_COMPILATION_PATCHPOINT)
8173
{
82-
if (compiler->ehGetBlockHndDsc(block) != nullptr)
83-
{
84-
JITDUMP("Patchpoint: skipping partial compilation patchpoint for " FMT_BB
85-
" as it is in a handler\n",
86-
block->bbNum);
87-
continue;
88-
}
74+
// We can't OSR from funclets.
75+
// Also, we don't import the IL for these blocks.
76+
//
77+
assert(!block->hasHndIndex());
78+
79+
// If we're instrumenting, we should not have decided to
80+
// put class probes here, as that is driven by looking at IL.
81+
//
82+
assert((block->bbFlags & BBF_HAS_CLASS_PROFILE) == 0);
83+
84+
// Clear the partial comp flag.
85+
//
86+
block->bbFlags &= ~BBF_PARTIAL_COMPILATION_PATCHPOINT;
8987

9088
JITDUMP("Patchpoint: partial compilation patchpoint in " FMT_BB "\n", block->bbNum);
9189
TransformPartialCompilation(block);
@@ -248,15 +246,6 @@ class PatchpointTransformer
248246
compiler->gtNewHelperCallNode(CORINFO_HELP_PARTIAL_COMPILATION_PATCHPOINT, TYP_VOID, helperArgs);
249247

250248
compiler->fgNewStmtAtEnd(block, helperCall);
251-
252-
// This block will no longer have class probes.
253-
// (They will appear in the OSR variant).
254-
//
255-
if ((block->bbFlags & BBF_HAS_CLASS_PROFILE) != 0)
256-
{
257-
JITDUMP("No longer adding class probes to " FMT_BB "\n", block->bbNum);
258-
block->bbFlags &= ~BBF_HAS_CLASS_PROFILE;
259-
}
260249
}
261250
};
262251

@@ -282,34 +271,8 @@ PhaseStatus Compiler::fgTransformPatchpoints()
282271
// We should only be adding patchpoints at Tier0, so should not be in an inlinee
283272
assert(!compIsForInlining());
284273

285-
// We currently can't do OSR in methods with localloc.
286-
// Such methods don't have a fixed relationship between frame and stack pointers.
287-
//
288-
// This is true whether or not the localloc was executed in the original method.
289-
//
290-
// TODO: handle this case, or else check this earlier and fall back to fully
291-
// optimizing the method (ala QJFL=0).
292-
if (compLocallocUsed)
293-
{
294-
JITDUMP("\n -- unable to handle methods with localloc\n");
295-
return PhaseStatus::MODIFIED_NOTHING;
296-
}
297-
298-
// We currently can't do OSR in synchronized methods. We need to alter
299-
// the logic in fgAddSyncMethodEnterExit for OSR to not try and obtain the
300-
// monitor (since the original method will have done so) and set the monitor
301-
// obtained flag to true (or reuse the original method slot value).
302-
if ((info.compFlags & CORINFO_FLG_SYNCH) != 0)
303-
{
304-
JITDUMP("\n -- unable to handle synchronized methods\n");
305-
return PhaseStatus::MODIFIED_NOTHING;
306-
}
307-
308-
if (opts.IsReversePInvoke())
309-
{
310-
JITDUMP(" -- unable to handle Reverse P/Invoke\n");
311-
return PhaseStatus::MODIFIED_NOTHING;
312-
}
274+
// We should be allowed to have patchpoints in this method.
275+
assert(compCanHavePatchpoints());
313276

314277
PatchpointTransformer ppTransformer(this);
315278
int count = ppTransformer.Run();

0 commit comments

Comments
 (0)